2024-12-01 23:42:59,431 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-01 23:42:59,443 main DEBUG Took 0.009716 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-01 23:42:59,443 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-01 23:42:59,444 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-01 23:42:59,445 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-01 23:42:59,446 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,460 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-01 23:42:59,471 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,473 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,474 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,474 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,475 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,475 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,476 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,477 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,477 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,478 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,479 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,479 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,480 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,480 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,481 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,481 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,482 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,482 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,483 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,483 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,484 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,484 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,485 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,485 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:42:59,486 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,486 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-01 23:42:59,488 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:42:59,490 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-01 23:42:59,492 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-01 23:42:59,492 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-01 23:42:59,494 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-01 23:42:59,494 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-01 23:42:59,505 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-01 23:42:59,508 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-01 23:42:59,510 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-01 23:42:59,510 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-01 23:42:59,510 main DEBUG createAppenders(={Console}) 2024-12-01 23:42:59,511 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-01 23:42:59,512 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-01 23:42:59,512 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-01 23:42:59,513 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-01 23:42:59,513 main DEBUG OutputStream closed 2024-12-01 23:42:59,513 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-01 23:42:59,514 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-01 23:42:59,514 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-01 23:42:59,587 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-01 23:42:59,590 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-01 23:42:59,591 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-01 23:42:59,592 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-01 23:42:59,593 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-01 23:42:59,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-01 23:42:59,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-01 23:42:59,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-01 23:42:59,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-01 23:42:59,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-01 23:42:59,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-01 23:42:59,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-01 23:42:59,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-01 23:42:59,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-01 23:42:59,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-01 23:42:59,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-01 23:42:59,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-01 23:42:59,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-01 23:42:59,601 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01 23:42:59,601 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-01 23:42:59,602 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-01 23:42:59,603 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-01T23:42:59,618 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-01 23:42:59,621 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-01 23:42:59,622 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01T23:42:59,841 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841 2024-12-01T23:42:59,865 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2, deleteOnExit=true 2024-12-01T23:42:59,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/test.cache.data in system properties and HBase conf 2024-12-01T23:42:59,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T23:42:59,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir in system properties and HBase conf 2024-12-01T23:42:59,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T23:42:59,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T23:42:59,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T23:42:59,970 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-01T23:43:00,064 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T23:43:00,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T23:43:00,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T23:43:00,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T23:43:00,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T23:43:00,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T23:43:00,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T23:43:00,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T23:43:00,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T23:43:00,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T23:43:00,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/nfs.dump.dir in system properties and HBase conf 2024-12-01T23:43:00,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/java.io.tmpdir in system properties and HBase conf 2024-12-01T23:43:00,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T23:43:00,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T23:43:00,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T23:43:01,025 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-01T23:43:01,128 INFO [Time-limited test {}] log.Log(170): Logging initialized @2361ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-01T23:43:01,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:01,274 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:01,293 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:01,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:01,295 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T23:43:01,307 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:01,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:01,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:01,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/java.io.tmpdir/jetty-localhost-36309-hadoop-hdfs-3_4_1-tests_jar-_-any-7063895066764072236/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T23:43:01,548 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:36309} 2024-12-01T23:43:01,548 INFO [Time-limited test {}] server.Server(415): Started @2782ms 2024-12-01T23:43:02,056 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:02,063 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:02,065 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:02,065 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:02,065 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T23:43:02,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32fec40a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:02,066 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17f1c7fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:02,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e705dc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/java.io.tmpdir/jetty-localhost-41179-hadoop-hdfs-3_4_1-tests_jar-_-any-13277822393114577248/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:02,162 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ad1569e{HTTP/1.1, (http/1.1)}{localhost:41179} 2024-12-01T23:43:02,162 INFO [Time-limited test {}] server.Server(415): Started @3396ms 2024-12-01T23:43:02,210 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:43:02,314 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:02,324 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:02,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:02,325 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:02,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T23:43:02,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a91ec1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:02,328 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c2c5be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:02,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26b068f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/java.io.tmpdir/jetty-localhost-35111-hadoop-hdfs-3_4_1-tests_jar-_-any-3338300197526890454/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:02,465 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5739b847{HTTP/1.1, (http/1.1)}{localhost:35111} 2024-12-01T23:43:02,465 INFO [Time-limited test {}] server.Server(415): Started @3699ms 2024-12-01T23:43:02,474 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:43:02,536 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:02,545 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:02,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:02,548 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:02,548 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T23:43:02,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e1f796{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:02,549 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1023f385{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:02,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f750918{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/java.io.tmpdir/jetty-localhost-34327-hadoop-hdfs-3_4_1-tests_jar-_-any-4127232867756181773/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:02,656 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@86bf2a7{HTTP/1.1, (http/1.1)}{localhost:34327} 2024-12-01T23:43:02,656 INFO [Time-limited test {}] server.Server(415): Started @3891ms 2024-12-01T23:43:02,659 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:43:03,404 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data1/current/BP-1127404106-172.17.0.2-1733096580555/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:03,404 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data4/current/BP-1127404106-172.17.0.2-1733096580555/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:03,404 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data2/current/BP-1127404106-172.17.0.2-1733096580555/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:03,404 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data3/current/BP-1127404106-172.17.0.2-1733096580555/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:03,436 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:43:03,436 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:43:03,480 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1027429d9e0d79d7 with lease ID 0xbf19b66ae81caa7b: Processing first storage report for DS-953f7562-4df4-400e-bdcd-239865113ecf from datanode DatanodeRegistration(127.0.0.1:38241, datanodeUuid=9928f13c-27b3-4c1e-a9b5-bfdb63528212, infoPort=36853, infoSecurePort=0, ipcPort=46729, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555) 2024-12-01T23:43:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1027429d9e0d79d7 with lease ID 0xbf19b66ae81caa7b: from storage DS-953f7562-4df4-400e-bdcd-239865113ecf node DatanodeRegistration(127.0.0.1:38241, datanodeUuid=9928f13c-27b3-4c1e-a9b5-bfdb63528212, infoPort=36853, infoSecurePort=0, ipcPort=46729, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T23:43:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6905d74b0039051 with lease ID 0xbf19b66ae81caa7c: Processing first storage report for DS-1b696df6-8425-448a-a2a3-174bd4f52d90 from datanode DatanodeRegistration(127.0.0.1:46667, datanodeUuid=b762d51f-4bfb-42cc-b5ba-48b4a0f2ecfa, infoPort=43795, infoSecurePort=0, ipcPort=37577, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555) 2024-12-01T23:43:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6905d74b0039051 with lease ID 0xbf19b66ae81caa7c: from storage DS-1b696df6-8425-448a-a2a3-174bd4f52d90 node DatanodeRegistration(127.0.0.1:46667, datanodeUuid=b762d51f-4bfb-42cc-b5ba-48b4a0f2ecfa, infoPort=43795, infoSecurePort=0, ipcPort=37577, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1027429d9e0d79d7 with lease ID 0xbf19b66ae81caa7b: Processing first storage report for DS-11f3b69a-c4a9-425e-8217-ae35f910bc94 from datanode DatanodeRegistration(127.0.0.1:38241, datanodeUuid=9928f13c-27b3-4c1e-a9b5-bfdb63528212, infoPort=36853, infoSecurePort=0, ipcPort=46729, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555) 2024-12-01T23:43:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1027429d9e0d79d7 with lease ID 0xbf19b66ae81caa7b: from storage DS-11f3b69a-c4a9-425e-8217-ae35f910bc94 node DatanodeRegistration(127.0.0.1:38241, datanodeUuid=9928f13c-27b3-4c1e-a9b5-bfdb63528212, infoPort=36853, infoSecurePort=0, ipcPort=46729, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:03,482 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6905d74b0039051 with lease ID 0xbf19b66ae81caa7c: Processing first storage report for DS-91e9e615-edde-4747-a9ad-06fbd6af2528 from datanode DatanodeRegistration(127.0.0.1:46667, datanodeUuid=b762d51f-4bfb-42cc-b5ba-48b4a0f2ecfa, infoPort=43795, infoSecurePort=0, ipcPort=37577, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555) 2024-12-01T23:43:03,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6905d74b0039051 with lease ID 0xbf19b66ae81caa7c: from storage DS-91e9e615-edde-4747-a9ad-06fbd6af2528 node DatanodeRegistration(127.0.0.1:46667, datanodeUuid=b762d51f-4bfb-42cc-b5ba-48b4a0f2ecfa, infoPort=43795, infoSecurePort=0, ipcPort=37577, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:03,529 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data6/current/BP-1127404106-172.17.0.2-1733096580555/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:03,529 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data5/current/BP-1127404106-172.17.0.2-1733096580555/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:03,549 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:43:03,553 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6285189b44ab50e with lease ID 0xbf19b66ae81caa7d: Processing first storage report for DS-9f48e73b-a700-4e5f-b82f-7578e41db98a from datanode DatanodeRegistration(127.0.0.1:35045, datanodeUuid=de8f40b9-0782-4848-bddd-65d491feb1e8, infoPort=40467, infoSecurePort=0, ipcPort=34397, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555) 2024-12-01T23:43:03,553 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6285189b44ab50e with lease ID 0xbf19b66ae81caa7d: from storage DS-9f48e73b-a700-4e5f-b82f-7578e41db98a node DatanodeRegistration(127.0.0.1:35045, datanodeUuid=de8f40b9-0782-4848-bddd-65d491feb1e8, infoPort=40467, infoSecurePort=0, ipcPort=34397, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T23:43:03,553 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6285189b44ab50e with lease ID 0xbf19b66ae81caa7d: Processing first storage report for DS-d57b2491-8ca5-4f7a-bc01-05feeea637fd from datanode DatanodeRegistration(127.0.0.1:35045, datanodeUuid=de8f40b9-0782-4848-bddd-65d491feb1e8, infoPort=40467, infoSecurePort=0, ipcPort=34397, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555) 2024-12-01T23:43:03,554 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6285189b44ab50e with lease ID 0xbf19b66ae81caa7d: from storage DS-d57b2491-8ca5-4f7a-bc01-05feeea637fd node DatanodeRegistration(127.0.0.1:35045, datanodeUuid=de8f40b9-0782-4848-bddd-65d491feb1e8, infoPort=40467, infoSecurePort=0, ipcPort=34397, storageInfo=lv=-57;cid=testClusterID;nsid=125126367;c=1733096580555), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:03,660 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841 2024-12-01T23:43:03,719 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-01T23:43:03,764 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=205, ProcessCount=11, AvailableMemoryMB=7556 2024-12-01T23:43:03,767 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T23:43:03,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-01T23:43:03,845 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/zookeeper_0, clientPort=60672, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T23:43:03,854 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60672 2024-12-01T23:43:03,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:03,882 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:03,955 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:03,956 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:04,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:47230 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:35045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47230 dst: /127.0.0.1:35045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:04,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-01T23:43:04,422 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:04,433 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d with version=8 2024-12-01T23:43:04,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/hbase-staging 2024-12-01T23:43:04,511 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-01T23:43:04,747 INFO [Time-limited test {}] client.ConnectionUtils(128): master/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:04,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:04,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:04,760 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:04,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:04,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:04,875 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T23:43:04,929 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-01T23:43:04,937 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-01T23:43:04,940 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:04,962 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 17796 (auto-detected) 2024-12-01T23:43:04,963 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-01T23:43:04,980 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46325 2024-12-01T23:43:04,999 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46325 connecting to ZooKeeper ensemble=127.0.0.1:60672 2024-12-01T23:43:05,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:463250x0, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:05,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46325-0x1019490c5560000 connected 2024-12-01T23:43:05,175 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,192 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:05,196 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d, hbase.cluster.distributed=false 2024-12-01T23:43:05,222 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:05,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46325 2024-12-01T23:43:05,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46325 2024-12-01T23:43:05,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46325 2024-12-01T23:43:05,227 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46325 2024-12-01T23:43:05,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46325 2024-12-01T23:43:05,314 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:05,316 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,316 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,316 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:05,316 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,316 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:05,319 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:43:05,320 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:05,321 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42367 2024-12-01T23:43:05,322 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42367 connecting to ZooKeeper ensemble=127.0.0.1:60672 2024-12-01T23:43:05,323 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423670x0, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:05,358 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42367-0x1019490c5560001 connected 2024-12-01T23:43:05,358 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:05,363 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:43:05,372 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:43:05,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:43:05,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:05,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42367 2024-12-01T23:43:05,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42367 2024-12-01T23:43:05,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42367 2024-12-01T23:43:05,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42367 2024-12-01T23:43:05,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42367 2024-12-01T23:43:05,405 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:05,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,406 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,406 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:05,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,407 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:05,407 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:43:05,407 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:05,408 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42187 2024-12-01T23:43:05,411 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42187 connecting to ZooKeeper ensemble=127.0.0.1:60672 2024-12-01T23:43:05,412 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421870x0, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:05,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:421870x0, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:05,450 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42187-0x1019490c5560002 connected 2024-12-01T23:43:05,451 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:43:05,452 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:43:05,453 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:43:05,456 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:05,457 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42187 2024-12-01T23:43:05,458 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42187 2024-12-01T23:43:05,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42187 2024-12-01T23:43:05,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42187 2024-12-01T23:43:05,460 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42187 2024-12-01T23:43:05,473 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:05,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,474 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:05,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:05,474 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:05,474 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:43:05,475 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:05,475 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46719 2024-12-01T23:43:05,477 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46719 connecting to ZooKeeper ensemble=127.0.0.1:60672 2024-12-01T23:43:05,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467190x0, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:05,491 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46719-0x1019490c5560003 connected 2024-12-01T23:43:05,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:467190x0, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:05,492 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:43:05,493 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:43:05,494 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:43:05,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:05,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46719 2024-12-01T23:43:05,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46719 2024-12-01T23:43:05,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46719 2024-12-01T23:43:05,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46719 2024-12-01T23:43:05,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46719 2024-12-01T23:43:05,514 DEBUG [M:0;579b0f681375:46325 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;579b0f681375:46325 2024-12-01T23:43:05,515 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/579b0f681375,46325,1733096584597 2024-12-01T23:43:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,526 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/579b0f681375,46325,1733096584597 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,550 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T23:43:05,551 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/579b0f681375,46325,1733096584597 from backup master directory 2024-12-01T23:43:05,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/579b0f681375,46325,1733096584597 2024-12-01T23:43:05,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:05,564 WARN [master/579b0f681375:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:05,565 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=579b0f681375,46325,1733096584597 2024-12-01T23:43:05,567 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-01T23:43:05,569 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-01T23:43:05,628 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/hbase.id] with ID: 74cac168-9f19-4415-b2ff-0996ffe9bfde 2024-12-01T23:43:05,628 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/.tmp/hbase.id 2024-12-01T23:43:05,635 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,635 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:49118 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49118 dst: /127.0.0.1:46667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:05,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-01T23:43:05,645 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:05,646 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/.tmp/hbase.id]:[hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/hbase.id] 2024-12-01T23:43:05,686 INFO [master/579b0f681375:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:05,691 INFO [master/579b0f681375:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T23:43:05,708 INFO [master/579b0f681375:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-01T23:43:05,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:05,746 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,747 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,749 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:47260 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:35045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47260 dst: /127.0.0.1:35045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:05,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-01T23:43:05,756 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:05,770 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:43:05,771 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T23:43:05,776 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:43:05,804 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,804 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:32994 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:38241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32994 dst: /127.0.0.1:38241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-01T23:43:05,815 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:05,830 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store 2024-12-01T23:43:05,849 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,849 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:05,852 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:33014 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33014 dst: /127.0.0.1:38241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:05,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-01T23:43:05,859 WARN [master/579b0f681375:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:05,863 INFO [master/579b0f681375:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-01T23:43:05,866 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:05,867 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T23:43:05,867 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:05,868 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:05,869 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T23:43:05,869 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:05,869 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:05,870 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733096585867Disabling compacts and flushes for region at 1733096585867Disabling writes for close at 1733096585869 (+2 ms)Writing region close event to WAL at 1733096585869Closed at 1733096585869 2024-12-01T23:43:05,873 WARN [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/.initializing 2024-12-01T23:43:05,873 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/WALs/579b0f681375,46325,1733096584597 2024-12-01T23:43:05,881 INFO [master/579b0f681375:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:43:05,897 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C46325%2C1733096584597, suffix=, logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/WALs/579b0f681375,46325,1733096584597, archiveDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/oldWALs, maxLogs=10 2024-12-01T23:43:05,924 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/WALs/579b0f681375,46325,1733096584597/579b0f681375%2C46325%2C1733096584597.1733096585901, exclude list is [], retry=0 2024-12-01T23:43:05,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:05,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46667,DS-1b696df6-8425-448a-a2a3-174bd4f52d90,DISK] 2024-12-01T23:43:05,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35045,DS-9f48e73b-a700-4e5f-b82f-7578e41db98a,DISK] 2024-12-01T23:43:05,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38241,DS-953f7562-4df4-400e-bdcd-239865113ecf,DISK] 2024-12-01T23:43:05,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-01T23:43:05,981 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/WALs/579b0f681375,46325,1733096584597/579b0f681375%2C46325%2C1733096584597.1733096585901 2024-12-01T23:43:05,983 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43795:43795),(127.0.0.1/127.0.0.1:36853:36853),(127.0.0.1/127.0.0.1:40467:40467)] 2024-12-01T23:43:05,983 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:43:05,983 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:05,986 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:05,987 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,019 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T23:43:06,046 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:06,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,052 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T23:43:06,052 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:06,053 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T23:43:06,056 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:06,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T23:43:06,060 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:06,062 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,065 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,066 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,071 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,071 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,074 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:43:06,077 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:06,083 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:43:06,084 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69602075, jitterRate=0.03715173900127411}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:43:06,090 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733096585998Initializing all the Stores at 1733096586000 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096586000Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096586001 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096586001Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096586001Cleaning up temporary data from old regions at 1733096586071 (+70 ms)Region opened successfully at 1733096586090 (+19 ms) 2024-12-01T23:43:06,091 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T23:43:06,122 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4252edd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:06,148 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T23:43:06,157 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T23:43:06,158 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T23:43:06,160 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T23:43:06,161 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-01T23:43:06,167 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-01T23:43:06,167 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T23:43:06,193 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T23:43:06,204 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T23:43:06,232 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T23:43:06,236 INFO [master/579b0f681375:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T23:43:06,238 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T23:43:06,323 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T23:43:06,329 INFO [master/579b0f681375:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T23:43:06,336 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T23:43:06,377 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T23:43:06,380 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T23:43:06,397 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T23:43:06,420 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T23:43:06,430 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T23:43:06,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:06,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:06,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:06,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:06,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,445 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=579b0f681375,46325,1733096584597, sessionid=0x1019490c5560000, setting cluster-up flag (Was=false) 2024-12-01T23:43:06,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-01T23:43:06,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-01T23:43:06,499 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T23:43:06,502 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=579b0f681375,46325,1733096584597 2024-12-01T23:43:06,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:06,548 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T23:43:06,550 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=579b0f681375,46325,1733096584597 2024-12-01T23:43:06,559 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T23:43:06,604 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(746): ClusterId : 74cac168-9f19-4415-b2ff-0996ffe9bfde 2024-12-01T23:43:06,604 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(746): ClusterId : 74cac168-9f19-4415-b2ff-0996ffe9bfde 2024-12-01T23:43:06,604 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(746): ClusterId : 74cac168-9f19-4415-b2ff-0996ffe9bfde 2024-12-01T23:43:06,606 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:43:06,606 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:43:06,606 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:43:06,625 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:06,634 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:43:06,634 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:43:06,634 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:43:06,634 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:43:06,634 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:43:06,634 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:43:06,635 INFO [master/579b0f681375:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T23:43:06,641 INFO [master/579b0f681375:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T23:43:06,642 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:43:06,642 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:43:06,642 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:43:06,642 DEBUG [RS:0;579b0f681375:42367 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26d0a63d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:06,642 DEBUG [RS:1;579b0f681375:42187 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e1f3106, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:06,642 DEBUG [RS:2;579b0f681375:46719 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71b261d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:06,647 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 579b0f681375,46325,1733096584597 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T23:43:06,655 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:06,655 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:06,655 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:06,655 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:06,655 DEBUG [RS:1;579b0f681375:42187 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;579b0f681375:42187 2024-12-01T23:43:06,655 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/579b0f681375:0, corePoolSize=10, maxPoolSize=10 2024-12-01T23:43:06,656 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,656 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:06,656 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,658 INFO [RS:1;579b0f681375:42187 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:43:06,658 INFO [RS:1;579b0f681375:42187 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:43:06,658 DEBUG [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:43:06,659 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733096616659 2024-12-01T23:43:06,660 DEBUG [RS:2;579b0f681375:46719 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;579b0f681375:46719 2024-12-01T23:43:06,660 INFO [RS:2;579b0f681375:46719 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:43:06,660 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;579b0f681375:42367 2024-12-01T23:43:06,660 INFO [RS:2;579b0f681375:46719 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:43:06,661 DEBUG [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:43:06,661 INFO [RS:0;579b0f681375:42367 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:43:06,661 INFO [RS:0;579b0f681375:42367 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:43:06,661 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:43:06,661 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,46325,1733096584597 with port=42187, startcode=1733096585405 2024-12-01T23:43:06,661 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T23:43:06,662 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,46325,1733096584597 with port=46719, startcode=1733096585473 2024-12-01T23:43:06,662 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T23:43:06,663 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,46325,1733096584597 with port=42367, startcode=1733096585284 2024-12-01T23:43:06,663 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:06,664 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T23:43:06,666 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T23:43:06,666 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T23:43:06,666 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T23:43:06,666 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T23:43:06,667 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,669 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,669 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T23:43:06,670 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T23:43:06,672 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T23:43:06,672 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T23:43:06,674 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T23:43:06,674 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T23:43:06,675 DEBUG [RS:1;579b0f681375:42187 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:43:06,675 DEBUG [RS:0;579b0f681375:42367 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:43:06,675 DEBUG [RS:2;579b0f681375:46719 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:43:06,679 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733096586675,5,FailOnTimeoutGroup] 2024-12-01T23:43:06,679 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733096586679,5,FailOnTimeoutGroup] 2024-12-01T23:43:06,679 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,680 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T23:43:06,681 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,681 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,681 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:06,682 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:06,687 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:49166 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49166 dst: /127.0.0.1:46667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:06,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-01T23:43:06,699 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:06,701 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T23:43:06,702 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d 2024-12-01T23:43:06,713 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:06,713 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56841, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:43:06,713 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59637, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:43:06,713 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48765, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:43:06,714 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:06,720 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:49180 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49180 dst: /127.0.0.1:46667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:06,720 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46325 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,42367,1733096585284 2024-12-01T23:43:06,722 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46325 {}] master.ServerManager(517): Registering regionserver=579b0f681375,42367,1733096585284 2024-12-01T23:43:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-01T23:43:06,728 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:06,730 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:06,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T23:43:06,733 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46325 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,46719,1733096585473 2024-12-01T23:43:06,733 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46325 {}] master.ServerManager(517): Registering regionserver=579b0f681375,46719,1733096585473 2024-12-01T23:43:06,736 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T23:43:06,736 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:06,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T23:43:06,740 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d 2024-12-01T23:43:06,740 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40603 2024-12-01T23:43:06,740 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:43:06,741 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46325 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,42187,1733096585405 2024-12-01T23:43:06,741 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46325 {}] master.ServerManager(517): Registering regionserver=579b0f681375,42187,1733096585405 2024-12-01T23:43:06,741 DEBUG [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d 2024-12-01T23:43:06,741 DEBUG [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40603 2024-12-01T23:43:06,741 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T23:43:06,741 DEBUG [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:43:06,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:06,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T23:43:06,745 DEBUG [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d 2024-12-01T23:43:06,745 DEBUG [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40603 2024-12-01T23:43:06,746 DEBUG [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:43:06,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T23:43:06,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:06,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T23:43:06,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T23:43:06,751 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:06,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:06,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T23:43:06,754 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740 2024-12-01T23:43:06,755 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740 2024-12-01T23:43:06,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T23:43:06,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T23:43:06,759 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:43:06,762 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T23:43:06,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:06,771 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:43:06,772 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64450076, jitterRate=-0.03961902856826782}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:43:06,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733096586730Initializing all the Stores at 1733096586732 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096586732Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096586733 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096586733Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096586733Cleaning up temporary data from old regions at 1733096586758 (+25 ms)Region opened successfully at 1733096586775 (+17 ms) 2024-12-01T23:43:06,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T23:43:06,776 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T23:43:06,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T23:43:06,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T23:43:06,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T23:43:06,778 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T23:43:06,778 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733096586776Disabling compacts and flushes for region at 1733096586776Disabling writes for close at 1733096586776Writing region close event to WAL at 1733096586777 (+1 ms)Closed at 1733096586778 (+1 ms) 2024-12-01T23:43:06,781 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:06,781 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T23:43:06,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T23:43:06,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T23:43:06,798 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T23:43:06,798 DEBUG [RS:0;579b0f681375:42367 {}] zookeeper.ZKUtil(111): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,42367,1733096585284 2024-12-01T23:43:06,798 WARN [RS:0;579b0f681375:42367 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:06,798 INFO [RS:0;579b0f681375:42367 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:43:06,798 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284 2024-12-01T23:43:06,799 DEBUG [RS:2;579b0f681375:46719 {}] zookeeper.ZKUtil(111): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,46719,1733096585473 2024-12-01T23:43:06,799 DEBUG [RS:1;579b0f681375:42187 {}] zookeeper.ZKUtil(111): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,42187,1733096585405 2024-12-01T23:43:06,800 WARN [RS:2;579b0f681375:46719 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:06,800 WARN [RS:1;579b0f681375:42187 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:06,800 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,46719,1733096585473] 2024-12-01T23:43:06,800 INFO [RS:2;579b0f681375:46719 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:43:06,800 INFO [RS:1;579b0f681375:42187 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:43:06,800 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,42367,1733096585284] 2024-12-01T23:43:06,800 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,42187,1733096585405] 2024-12-01T23:43:06,800 DEBUG [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,46719,1733096585473 2024-12-01T23:43:06,800 DEBUG [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42187,1733096585405 2024-12-01T23:43:06,824 INFO [RS:0;579b0f681375:42367 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:43:06,825 INFO [RS:1;579b0f681375:42187 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:43:06,824 INFO [RS:2;579b0f681375:46719 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:43:06,838 INFO [RS:1;579b0f681375:42187 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:43:06,838 INFO [RS:0;579b0f681375:42367 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:43:06,838 INFO [RS:2;579b0f681375:46719 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:43:06,843 INFO [RS:1;579b0f681375:42187 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:43:06,843 INFO [RS:0;579b0f681375:42367 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:43:06,843 INFO [RS:2;579b0f681375:46719 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:43:06,843 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,843 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,843 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,845 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:43:06,845 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:43:06,845 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:43:06,851 INFO [RS:2;579b0f681375:46719 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:43:06,851 INFO [RS:1;579b0f681375:42187 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:43:06,851 INFO [RS:0;579b0f681375:42367 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:43:06,852 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,852 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,852 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:06,853 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,853 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:06,854 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:2;579b0f681375:46719 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:06,854 DEBUG [RS:1;579b0f681375:42187 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:06,854 DEBUG [RS:0;579b0f681375:42367 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:06,858 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,858 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,858 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,858 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46719,1733096585473-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,42367,1733096585284-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:06,859 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,42187,1733096585405-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:06,877 INFO [RS:2;579b0f681375:46719 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:43:06,877 INFO [RS:0;579b0f681375:42367 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:43:06,877 INFO [RS:1;579b0f681375:42187 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:43:06,879 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46719,1733096585473-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,879 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,42187,1733096585405-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,879 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,42367,1733096585284-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,879 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,879 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,879 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,880 INFO [RS:2;579b0f681375:46719 {}] regionserver.Replication(171): 579b0f681375,46719,1733096585473 started 2024-12-01T23:43:06,880 INFO [RS:1;579b0f681375:42187 {}] regionserver.Replication(171): 579b0f681375,42187,1733096585405 started 2024-12-01T23:43:06,880 INFO [RS:0;579b0f681375:42367 {}] regionserver.Replication(171): 579b0f681375,42367,1733096585284 started 2024-12-01T23:43:06,896 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,896 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,46719,1733096585473, RpcServer on 579b0f681375/172.17.0.2:46719, sessionid=0x1019490c5560003 2024-12-01T23:43:06,896 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,896 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,42187,1733096585405, RpcServer on 579b0f681375/172.17.0.2:42187, sessionid=0x1019490c5560002 2024-12-01T23:43:06,897 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:43:06,897 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:43:06,897 DEBUG [RS:2;579b0f681375:46719 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,46719,1733096585473 2024-12-01T23:43:06,897 DEBUG [RS:1;579b0f681375:42187 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,42187,1733096585405 2024-12-01T23:43:06,897 DEBUG [RS:2;579b0f681375:46719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,46719,1733096585473' 2024-12-01T23:43:06,897 DEBUG [RS:1;579b0f681375:42187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,42187,1733096585405' 2024-12-01T23:43:06,897 DEBUG [RS:2;579b0f681375:46719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:43:06,897 DEBUG [RS:1;579b0f681375:42187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:43:06,898 DEBUG [RS:2;579b0f681375:46719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:43:06,898 DEBUG [RS:1;579b0f681375:42187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:43:06,899 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:43:06,899 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:43:06,899 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:43:06,899 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:43:06,899 DEBUG [RS:1;579b0f681375:42187 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,42187,1733096585405 2024-12-01T23:43:06,899 DEBUG [RS:2;579b0f681375:46719 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,46719,1733096585473 2024-12-01T23:43:06,899 DEBUG [RS:1;579b0f681375:42187 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,42187,1733096585405' 2024-12-01T23:43:06,899 DEBUG [RS:2;579b0f681375:46719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,46719,1733096585473' 2024-12-01T23:43:06,899 DEBUG [RS:1;579b0f681375:42187 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:43:06,899 DEBUG [RS:2;579b0f681375:46719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:43:06,900 DEBUG [RS:2;579b0f681375:46719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:43:06,900 DEBUG [RS:1;579b0f681375:42187 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:43:06,900 DEBUG [RS:1;579b0f681375:42187 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:43:06,900 DEBUG [RS:2;579b0f681375:46719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:43:06,900 INFO [RS:2;579b0f681375:46719 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T23:43:06,900 INFO [RS:1;579b0f681375:42187 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T23:43:06,900 INFO [RS:1;579b0f681375:42187 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T23:43:06,900 INFO [RS:2;579b0f681375:46719 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T23:43:06,902 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:06,903 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,42367,1733096585284, RpcServer on 579b0f681375/172.17.0.2:42367, sessionid=0x1019490c5560001 2024-12-01T23:43:06,903 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:43:06,903 DEBUG [RS:0;579b0f681375:42367 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,42367,1733096585284 2024-12-01T23:43:06,903 DEBUG [RS:0;579b0f681375:42367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,42367,1733096585284' 2024-12-01T23:43:06,903 DEBUG [RS:0;579b0f681375:42367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:43:06,904 DEBUG [RS:0;579b0f681375:42367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:43:06,905 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:43:06,905 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:43:06,905 DEBUG [RS:0;579b0f681375:42367 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,42367,1733096585284 2024-12-01T23:43:06,905 DEBUG [RS:0;579b0f681375:42367 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,42367,1733096585284' 2024-12-01T23:43:06,905 DEBUG [RS:0;579b0f681375:42367 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:43:06,906 DEBUG [RS:0;579b0f681375:42367 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:43:06,907 DEBUG [RS:0;579b0f681375:42367 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:43:06,907 INFO [RS:0;579b0f681375:42367 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T23:43:06,907 INFO [RS:0;579b0f681375:42367 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T23:43:06,949 WARN [579b0f681375:46325 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T23:43:07,004 INFO [RS:1;579b0f681375:42187 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:43:07,004 INFO [RS:2;579b0f681375:46719 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:43:07,007 INFO [RS:1;579b0f681375:42187 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C42187%2C1733096585405, suffix=, logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42187,1733096585405, archiveDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs, maxLogs=32 2024-12-01T23:43:07,007 INFO [RS:2;579b0f681375:46719 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C46719%2C1733096585473, suffix=, logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,46719,1733096585473, archiveDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs, maxLogs=32 2024-12-01T23:43:07,008 INFO [RS:0;579b0f681375:42367 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:43:07,012 INFO [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C42367%2C1733096585284, suffix=, logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284, archiveDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs, maxLogs=32 2024-12-01T23:43:07,024 DEBUG [RS:1;579b0f681375:42187 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42187,1733096585405/579b0f681375%2C42187%2C1733096585405.1733096587011, exclude list is [], retry=0 2024-12-01T23:43:07,026 DEBUG [RS:2;579b0f681375:46719 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,46719,1733096585473/579b0f681375%2C46719%2C1733096585473.1733096587011, exclude list is [], retry=0 2024-12-01T23:43:07,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46667,DS-1b696df6-8425-448a-a2a3-174bd4f52d90,DISK] 2024-12-01T23:43:07,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35045,DS-9f48e73b-a700-4e5f-b82f-7578e41db98a,DISK] 2024-12-01T23:43:07,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38241,DS-953f7562-4df4-400e-bdcd-239865113ecf,DISK] 2024-12-01T23:43:07,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38241,DS-953f7562-4df4-400e-bdcd-239865113ecf,DISK] 2024-12-01T23:43:07,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46667,DS-1b696df6-8425-448a-a2a3-174bd4f52d90,DISK] 2024-12-01T23:43:07,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35045,DS-9f48e73b-a700-4e5f-b82f-7578e41db98a,DISK] 2024-12-01T23:43:07,062 DEBUG [RS:0;579b0f681375:42367 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284/579b0f681375%2C42367%2C1733096585284.1733096587014, exclude list is [], retry=0 2024-12-01T23:43:07,062 INFO [RS:1;579b0f681375:42187 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42187,1733096585405/579b0f681375%2C42187%2C1733096585405.1733096587011 2024-12-01T23:43:07,063 DEBUG [RS:1;579b0f681375:42187 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43795:43795),(127.0.0.1/127.0.0.1:40467:40467),(127.0.0.1/127.0.0.1:36853:36853)] 2024-12-01T23:43:07,064 INFO [RS:2;579b0f681375:46719 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,46719,1733096585473/579b0f681375%2C46719%2C1733096585473.1733096587011 2024-12-01T23:43:07,065 DEBUG [RS:2;579b0f681375:46719 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36853:36853),(127.0.0.1/127.0.0.1:43795:43795),(127.0.0.1/127.0.0.1:40467:40467)] 2024-12-01T23:43:07,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35045,DS-9f48e73b-a700-4e5f-b82f-7578e41db98a,DISK] 2024-12-01T23:43:07,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38241,DS-953f7562-4df4-400e-bdcd-239865113ecf,DISK] 2024-12-01T23:43:07,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46667,DS-1b696df6-8425-448a-a2a3-174bd4f52d90,DISK] 2024-12-01T23:43:07,072 INFO [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284/579b0f681375%2C42367%2C1733096585284.1733096587014 2024-12-01T23:43:07,073 DEBUG [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40467:40467),(127.0.0.1/127.0.0.1:36853:36853),(127.0.0.1/127.0.0.1:43795:43795)] 2024-12-01T23:43:07,202 DEBUG [579b0f681375:46325 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T23:43:07,210 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T23:43:07,219 INFO [579b0f681375:46325 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:43:07,219 INFO [579b0f681375:46325 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:43:07,219 INFO [579b0f681375:46325 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T23:43:07,219 DEBUG [579b0f681375:46325 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:43:07,226 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=579b0f681375,42367,1733096585284 2024-12-01T23:43:07,232 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 579b0f681375,42367,1733096585284, state=OPENING 2024-12-01T23:43:07,272 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T23:43:07,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:07,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:07,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:07,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:07,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,289 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T23:43:07,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=579b0f681375,42367,1733096585284}] 2024-12-01T23:43:07,469 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T23:43:07,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39029, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T23:43:07,483 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T23:43:07,484 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:43:07,484 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-01T23:43:07,488 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C42367%2C1733096585284.meta, suffix=.meta, logDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284, archiveDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs, maxLogs=32 2024-12-01T23:43:07,502 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284/579b0f681375%2C42367%2C1733096585284.meta.1733096587489.meta, exclude list is [], retry=0 2024-12-01T23:43:07,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46667,DS-1b696df6-8425-448a-a2a3-174bd4f52d90,DISK] 2024-12-01T23:43:07,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35045,DS-9f48e73b-a700-4e5f-b82f-7578e41db98a,DISK] 2024-12-01T23:43:07,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38241,DS-953f7562-4df4-400e-bdcd-239865113ecf,DISK] 2024-12-01T23:43:07,511 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/WALs/579b0f681375,42367,1733096585284/579b0f681375%2C42367%2C1733096585284.meta.1733096587489.meta 2024-12-01T23:43:07,512 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43795:43795),(127.0.0.1/127.0.0.1:40467:40467),(127.0.0.1/127.0.0.1:36853:36853)] 2024-12-01T23:43:07,512 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:43:07,514 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T23:43:07,517 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T23:43:07,521 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T23:43:07,525 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T23:43:07,526 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:07,526 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T23:43:07,526 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T23:43:07,529 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T23:43:07,531 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T23:43:07,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:07,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:07,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T23:43:07,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T23:43:07,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:07,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:07,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T23:43:07,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T23:43:07,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:07,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:07,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T23:43:07,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T23:43:07,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:07,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:07,541 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T23:43:07,543 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740 2024-12-01T23:43:07,545 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740 2024-12-01T23:43:07,547 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T23:43:07,547 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T23:43:07,548 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:43:07,550 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T23:43:07,551 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59645489, jitterRate=-0.11121295392513275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:43:07,551 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T23:43:07,553 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733096587526Writing region info on filesystem at 1733096587526Initializing all the Stores at 1733096587528 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096587529 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096587529Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096587529Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096587529Cleaning up temporary data from old regions at 1733096587547 (+18 ms)Running coprocessor post-open hooks at 1733096587551 (+4 ms)Region opened successfully at 1733096587553 (+2 ms) 2024-12-01T23:43:07,559 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733096587461 2024-12-01T23:43:07,569 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T23:43:07,570 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T23:43:07,571 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,42367,1733096585284 2024-12-01T23:43:07,573 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 579b0f681375,42367,1733096585284, state=OPEN 2024-12-01T23:43:07,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:07,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:07,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:07,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:07,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,605 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:07,606 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=579b0f681375,42367,1733096585284 2024-12-01T23:43:07,615 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T23:43:07,615 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=579b0f681375,42367,1733096585284 in 315 msec 2024-12-01T23:43:07,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T23:43:07,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 831 msec 2024-12-01T23:43:07,624 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:07,625 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T23:43:07,642 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:43:07,643 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,42367,1733096585284, seqNum=-1] 2024-12-01T23:43:07,683 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:43:07,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38181, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:43:07,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1210 sec 2024-12-01T23:43:07,711 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733096587711, completionTime=-1 2024-12-01T23:43:07,713 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T23:43:07,713 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T23:43:07,736 INFO [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T23:43:07,736 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733096647736 2024-12-01T23:43:07,737 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733096707737 2024-12-01T23:43:07,737 INFO [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 23 msec 2024-12-01T23:43:07,738 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:43:07,745 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46325,1733096584597-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,745 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46325,1733096584597-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,745 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46325,1733096584597-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,747 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-579b0f681375:46325, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,747 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,748 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,754 DEBUG [master/579b0f681375:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T23:43:07,775 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.210sec 2024-12-01T23:43:07,777 INFO [master/579b0f681375:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T23:43:07,778 INFO [master/579b0f681375:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T23:43:07,779 INFO [master/579b0f681375:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T23:43:07,779 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T23:43:07,779 INFO [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T23:43:07,780 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46325,1733096584597-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:07,781 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46325,1733096584597-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T23:43:07,786 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T23:43:07,786 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T23:43:07,787 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,46325,1733096584597-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:07,816 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c930c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:43:07,821 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-01T23:43:07,821 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-01T23:43:07,826 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 579b0f681375,46325,-1 for getting cluster id 2024-12-01T23:43:07,828 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T23:43:07,835 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '74cac168-9f19-4415-b2ff-0996ffe9bfde' 2024-12-01T23:43:07,837 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T23:43:07,838 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "74cac168-9f19-4415-b2ff-0996ffe9bfde" 2024-12-01T23:43:07,838 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56919eb5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:43:07,838 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [579b0f681375,46325,-1] 2024-12-01T23:43:07,840 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T23:43:07,842 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:07,843 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T23:43:07,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4989f75b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:43:07,846 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:43:07,853 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,42367,1733096585284, seqNum=-1] 2024-12-01T23:43:07,853 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:43:07,856 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:43:07,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=579b0f681375,46325,1733096584597 2024-12-01T23:43:07,879 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T23:43:07,883 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 579b0f681375,46325,1733096584597 2024-12-01T23:43:07,885 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3230ee24 2024-12-01T23:43:07,885 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T23:43:07,887 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47330, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T23:43:07,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:43:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-01T23:43:07,901 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:43:07,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-01T23:43:07,903 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:07,906 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:43:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:07,914 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:07,914 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:07,917 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:47322 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:35045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47322 dst: /127.0.0.1:35045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:07,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-01T23:43:07,924 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:07,927 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 504b2a0a2f2d4ec47089037f1357cbba, NAME => 'TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d 2024-12-01T23:43:07,933 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:07,934 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:07,936 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:47338 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:35045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47338 dst: /127.0.0.1:35045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:07,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-01T23:43:07,941 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:07,942 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:07,942 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 504b2a0a2f2d4ec47089037f1357cbba, disabling compactions & flushes 2024-12-01T23:43:07,942 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:07,942 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:07,942 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. after waiting 0 ms 2024-12-01T23:43:07,942 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:07,942 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:07,943 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 504b2a0a2f2d4ec47089037f1357cbba: Waiting for close lock at 1733096587942Disabling compacts and flushes for region at 1733096587942Disabling writes for close at 1733096587942Writing region close event to WAL at 1733096587942Closed at 1733096587942 2024-12-01T23:43:07,945 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:43:07,949 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733096587945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733096587945"}]},"ts":"1733096587945"} 2024-12-01T23:43:07,954 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T23:43:07,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:43:07,958 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733096587956"}]},"ts":"1733096587956"} 2024-12-01T23:43:07,962 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-01T23:43:07,963 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T23:43:07,964 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:43:07,964 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:43:07,964 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T23:43:07,964 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:43:07,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=504b2a0a2f2d4ec47089037f1357cbba, ASSIGN}] 2024-12-01T23:43:07,968 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=504b2a0a2f2d4ec47089037f1357cbba, ASSIGN 2024-12-01T23:43:07,970 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=504b2a0a2f2d4ec47089037f1357cbba, ASSIGN; state=OFFLINE, location=579b0f681375,42367,1733096585284; forceNewPlan=false, retain=false 2024-12-01T23:43:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:08,124 INFO [579b0f681375:46325 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T23:43:08,126 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=504b2a0a2f2d4ec47089037f1357cbba, regionState=OPENING, regionLocation=579b0f681375,42367,1733096585284 2024-12-01T23:43:08,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=504b2a0a2f2d4ec47089037f1357cbba, ASSIGN because future has completed 2024-12-01T23:43:08,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 504b2a0a2f2d4ec47089037f1357cbba, server=579b0f681375,42367,1733096585284}] 2024-12-01T23:43:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:08,295 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,296 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 504b2a0a2f2d4ec47089037f1357cbba, NAME => 'TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:43:08,296 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,296 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:08,296 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,296 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,299 INFO [StoreOpener-504b2a0a2f2d4ec47089037f1357cbba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,302 INFO [StoreOpener-504b2a0a2f2d4ec47089037f1357cbba-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 504b2a0a2f2d4ec47089037f1357cbba columnFamilyName cf 2024-12-01T23:43:08,302 DEBUG [StoreOpener-504b2a0a2f2d4ec47089037f1357cbba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:08,304 INFO [StoreOpener-504b2a0a2f2d4ec47089037f1357cbba-1 {}] regionserver.HStore(327): Store=504b2a0a2f2d4ec47089037f1357cbba/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:08,304 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,305 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,306 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,307 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,307 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,310 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,316 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:43:08,317 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 504b2a0a2f2d4ec47089037f1357cbba; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59350092, jitterRate=-0.11561471223831177}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:43:08,317 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,318 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 504b2a0a2f2d4ec47089037f1357cbba: Running coprocessor pre-open hook at 1733096588297Writing region info on filesystem at 1733096588297Initializing all the Stores at 1733096588299 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096588299Cleaning up temporary data from old regions at 1733096588307 (+8 ms)Running coprocessor post-open hooks at 1733096588317 (+10 ms)Region opened successfully at 1733096588318 (+1 ms) 2024-12-01T23:43:08,320 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba., pid=6, masterSystemTime=1733096588288 2024-12-01T23:43:08,324 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,324 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,326 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=504b2a0a2f2d4ec47089037f1357cbba, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,42367,1733096585284 2024-12-01T23:43:08,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 504b2a0a2f2d4ec47089037f1357cbba, server=579b0f681375,42367,1733096585284 because future has completed 2024-12-01T23:43:08,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T23:43:08,335 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 504b2a0a2f2d4ec47089037f1357cbba, server=579b0f681375,42367,1733096585284 in 199 msec 2024-12-01T23:43:08,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T23:43:08,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=504b2a0a2f2d4ec47089037f1357cbba, ASSIGN in 370 msec 2024-12-01T23:43:08,341 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:43:08,341 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733096588341"}]},"ts":"1733096588341"} 2024-12-01T23:43:08,344 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-01T23:43:08,346 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:43:08,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 451 msec 2024-12-01T23:43:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:08,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-01T23:43:08,543 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T23:43:08,546 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:43:08,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-01T23:43:08,552 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:43:08,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-01T23:43:08,560 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba., hostname=579b0f681375,42367,1733096585284, seqNum=2] 2024-12-01T23:43:08,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-01T23:43:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-01T23:43:08,577 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-01T23:43:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:43:08,578 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T23:43:08,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T23:43:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:43:08,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42367 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-01T23:43:08,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,753 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 504b2a0a2f2d4ec47089037f1357cbba 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-01T23:43:08,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/.tmp/cf/641ea180cacc48f6b304466d54341bc3 is 36, key is row/cf:cq/1733096588563/Put/seqid=0 2024-12-01T23:43:08,806 WARN [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:08,806 WARN [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:08,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-149051909_22 at /127.0.0.1:33090 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33090 dst: /127.0.0.1:38241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:08,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-01T23:43:08,815 WARN [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:08,815 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/.tmp/cf/641ea180cacc48f6b304466d54341bc3 2024-12-01T23:43:08,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/.tmp/cf/641ea180cacc48f6b304466d54341bc3 as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/cf/641ea180cacc48f6b304466d54341bc3 2024-12-01T23:43:08,866 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/cf/641ea180cacc48f6b304466d54341bc3, entries=1, sequenceid=5, filesize=4.7 K 2024-12-01T23:43:08,872 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 504b2a0a2f2d4ec47089037f1357cbba in 119ms, sequenceid=5, compaction requested=false 2024-12-01T23:43:08,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-01T23:43:08,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 504b2a0a2f2d4ec47089037f1357cbba: 2024-12-01T23:43:08,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-01T23:43:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-01T23:43:08,884 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T23:43:08,884 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 301 msec 2024-12-01T23:43:08,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 313 msec 2024-12-01T23:43:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46325 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:43:08,890 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T23:43:08,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T23:43:08,903 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T23:43:08,904 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:08,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,908 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T23:43:08,908 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T23:43:08,908 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1575943754, stopped=false 2024-12-01T23:43:08,908 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=579b0f681375,46325,1733096584597 2024-12-01T23:43:08,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:08,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:08,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:08,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:08,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:08,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:08,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:08,964 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T23:43:08,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:08,965 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:08,965 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T23:43:08,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:08,966 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:08,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:08,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:08,966 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,967 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,42367,1733096585284' ***** 2024-12-01T23:43:08,967 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:43:08,967 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,42187,1733096585405' ***** 2024-12-01T23:43:08,967 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:43:08,967 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,46719,1733096585473' ***** 2024-12-01T23:43:08,967 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:43:08,967 INFO [RS:0;579b0f681375:42367 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:43:08,967 INFO [RS:1;579b0f681375:42187 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:43:08,968 INFO [RS:1;579b0f681375:42187 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:43:08,968 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:43:08,968 INFO [RS:0;579b0f681375:42367 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:43:08,968 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:43:08,968 INFO [RS:2;579b0f681375:46719 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:43:08,968 INFO [RS:2;579b0f681375:46719 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:43:08,968 INFO [RS:1;579b0f681375:42187 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:43:08,968 INFO [RS:0;579b0f681375:42367 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:43:08,968 INFO [RS:2;579b0f681375:46719 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:43:08,968 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:43:08,968 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,42187,1733096585405 2024-12-01T23:43:08,968 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,46719,1733096585473 2024-12-01T23:43:08,968 INFO [RS:1;579b0f681375:42187 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:08,968 INFO [RS:2;579b0f681375:46719 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:08,968 INFO [RS:1;579b0f681375:42187 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;579b0f681375:42187. 2024-12-01T23:43:08,968 INFO [RS:2;579b0f681375:46719 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;579b0f681375:46719. 2024-12-01T23:43:08,968 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(3091): Received CLOSE for 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,968 DEBUG [RS:1;579b0f681375:42187 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:08,969 DEBUG [RS:2;579b0f681375:46719 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:08,969 DEBUG [RS:1;579b0f681375:42187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,969 DEBUG [RS:2;579b0f681375:46719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,969 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,42187,1733096585405; all regions closed. 2024-12-01T23:43:08,969 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,46719,1733096585473; all regions closed. 2024-12-01T23:43:08,969 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,42367,1733096585284 2024-12-01T23:43:08,969 INFO [RS:0;579b0f681375:42367 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:08,969 INFO [RS:0;579b0f681375:42367 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;579b0f681375:42367. 2024-12-01T23:43:08,970 DEBUG [RS:0;579b0f681375:42367 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:08,970 DEBUG [RS:0;579b0f681375:42367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,970 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 504b2a0a2f2d4ec47089037f1357cbba, disabling compactions & flushes 2024-12-01T23:43:08,970 INFO [RS:0;579b0f681375:42367 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:43:08,970 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,970 INFO [RS:0;579b0f681375:42367 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:43:08,970 INFO [RS:0;579b0f681375:42367 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:43:08,970 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,970 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. after waiting 0 ms 2024-12-01T23:43:08,970 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T23:43:08,970 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,971 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T23:43:08,971 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T23:43:08,971 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-01T23:43:08,971 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T23:43:08,971 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1325): Online Regions={504b2a0a2f2d4ec47089037f1357cbba=TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba., 1588230740=hbase:meta,,1.1588230740} 2024-12-01T23:43:08,971 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T23:43:08,971 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T23:43:08,971 DEBUG [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 504b2a0a2f2d4ec47089037f1357cbba 2024-12-01T23:43:08,972 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-01T23:43:08,975 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:08,975 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:08,975 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:08,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_1073741827_1017 (size=93) 2024-12-01T23:43:08,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741827_1017 (size=93) 2024-12-01T23:43:08,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_1073741827_1017 (size=93) 2024-12-01T23:43:08,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741826_1016 (size=93) 2024-12-01T23:43:08,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_1073741826_1016 (size=93) 2024-12-01T23:43:08,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_1073741826_1016 (size=93) 2024-12-01T23:43:08,986 DEBUG [RS:2;579b0f681375:46719 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs 2024-12-01T23:43:08,986 DEBUG [RS:1;579b0f681375:42187 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs 2024-12-01T23:43:08,986 INFO [RS:2;579b0f681375:46719 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C46719%2C1733096585473:(num 1733096587011) 2024-12-01T23:43:08,986 INFO [RS:1;579b0f681375:42187 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C42187%2C1733096585405:(num 1733096587011) 2024-12-01T23:43:08,986 DEBUG [RS:2;579b0f681375:46719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:08,986 DEBUG [RS:1;579b0f681375:42187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:43:08,987 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:43:08,987 INFO [RS:2;579b0f681375:46719 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:08,987 INFO [RS:1;579b0f681375:42187 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:08,987 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:08,988 INFO [RS:2;579b0f681375:46719 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46719 2024-12-01T23:43:08,988 INFO [RS:1;579b0f681375:42187 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42187 2024-12-01T23:43:08,994 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/default/TestHBaseWalOnEC/504b2a0a2f2d4ec47089037f1357cbba/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T23:43:08,996 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,996 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 504b2a0a2f2d4ec47089037f1357cbba: Waiting for close lock at 1733096588969Running coprocessor pre-close hooks at 1733096588970 (+1 ms)Disabling compacts and flushes for region at 1733096588970Disabling writes for close at 1733096588970Writing region close event to WAL at 1733096588975 (+5 ms)Running coprocessor post-close hooks at 1733096588995 (+20 ms)Closed at 1733096588996 (+1 ms) 2024-12-01T23:43:08,997 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba. 2024-12-01T23:43:08,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:08,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,42187,1733096585405 2024-12-01T23:43:08,998 INFO [RS:1;579b0f681375:42187 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:08,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,46719,1733096585473 2024-12-01T23:43:08,999 INFO [RS:2;579b0f681375:46719 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:08,999 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,46719,1733096585473] 2024-12-01T23:43:09,002 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/info/e9d95f1eb92f4e5fb1ed755b76347b1e is 153, key is TestHBaseWalOnEC,,1733096587889.504b2a0a2f2d4ec47089037f1357cbba./info:regioninfo/1733096588325/Put/seqid=0 2024-12-01T23:43:09,005 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,005 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,009 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-149051909_22 at /127.0.0.1:49232 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49232 dst: /127.0.0.1:46667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-01T23:43:09,015 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,46719,1733096585473 already deleted, retry=false 2024-12-01T23:43:09,015 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,015 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,46719,1733096585473 expired; onlineServers=2 2024-12-01T23:43:09,015 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,42187,1733096585405] 2024-12-01T23:43:09,015 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/info/e9d95f1eb92f4e5fb1ed755b76347b1e 2024-12-01T23:43:09,023 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,42187,1733096585405 already deleted, retry=false 2024-12-01T23:43:09,023 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,42187,1733096585405 expired; onlineServers=1 2024-12-01T23:43:09,042 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/ns/0225af3d9f2941c38ff6a5bc1d908978 is 43, key is default/ns:d/1733096587691/Put/seqid=0 2024-12-01T23:43:09,044 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,044 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-149051909_22 at /127.0.0.1:49240 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49240 dst: /127.0.0.1:46667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-01T23:43:09,053 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,053 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/ns/0225af3d9f2941c38ff6a5bc1d908978 2024-12-01T23:43:09,077 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/table/c7da3722ccd743a5879782aa5bc9ce4d is 52, key is TestHBaseWalOnEC/table:state/1733096588341/Put/seqid=0 2024-12-01T23:43:09,080 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,080 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,083 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-149051909_22 at /127.0.0.1:49256 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:46667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49256 dst: /127.0.0.1:46667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-01T23:43:09,087 WARN [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,088 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/table/c7da3722ccd743a5879782aa5bc9ce4d 2024-12-01T23:43:09,098 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/info/e9d95f1eb92f4e5fb1ed755b76347b1e as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/info/e9d95f1eb92f4e5fb1ed755b76347b1e 2024-12-01T23:43:09,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46719-0x1019490c5560003, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42187-0x1019490c5560002, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,107 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/info/e9d95f1eb92f4e5fb1ed755b76347b1e, entries=10, sequenceid=11, filesize=6.5 K 2024-12-01T23:43:09,107 INFO [RS:1;579b0f681375:42187 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:09,107 INFO [RS:2;579b0f681375:46719 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:09,108 INFO [RS:1;579b0f681375:42187 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,42187,1733096585405; zookeeper connection closed. 2024-12-01T23:43:09,108 INFO [RS:2;579b0f681375:46719 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,46719,1733096585473; zookeeper connection closed. 2024-12-01T23:43:09,108 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@666c3e47 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@666c3e47 2024-12-01T23:43:09,108 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@ab5d933 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@ab5d933 2024-12-01T23:43:09,109 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/ns/0225af3d9f2941c38ff6a5bc1d908978 as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/ns/0225af3d9f2941c38ff6a5bc1d908978 2024-12-01T23:43:09,120 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/ns/0225af3d9f2941c38ff6a5bc1d908978, entries=2, sequenceid=11, filesize=5.0 K 2024-12-01T23:43:09,122 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/.tmp/table/c7da3722ccd743a5879782aa5bc9ce4d as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/table/c7da3722ccd743a5879782aa5bc9ce4d 2024-12-01T23:43:09,132 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/table/c7da3722ccd743a5879782aa5bc9ce4d, entries=2, sequenceid=11, filesize=5.1 K 2024-12-01T23:43:09,134 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false 2024-12-01T23:43:09,134 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T23:43:09,143 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-01T23:43:09,144 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T23:43:09,144 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T23:43:09,144 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733096588971Running coprocessor pre-close hooks at 1733096588971Disabling compacts and flushes for region at 1733096588971Disabling writes for close at 1733096588971Obtaining lock to block concurrent updates at 1733096588972 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733096588972Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733096588972Flushing stores of hbase:meta,,1.1588230740 at 1733096588973 (+1 ms)Flushing 1588230740/info: creating writer at 1733096588973Flushing 1588230740/info: appending metadata at 1733096588999 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733096589000 (+1 ms)Flushing 1588230740/ns: creating writer at 1733096589026 (+26 ms)Flushing 1588230740/ns: appending metadata at 1733096589041 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733096589041Flushing 1588230740/table: creating writer at 1733096589062 (+21 ms)Flushing 1588230740/table: appending metadata at 1733096589076 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733096589077 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78f6a4ab: reopening flushed file at 1733096589096 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34744187: reopening flushed file at 1733096589108 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40be7c0: reopening flushed file at 1733096589120 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false at 1733096589134 (+14 ms)Writing region close event to WAL at 1733096589136 (+2 ms)Running coprocessor post-close hooks at 1733096589144 (+8 ms)Closed at 1733096589144 2024-12-01T23:43:09,145 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T23:43:09,172 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,42367,1733096585284; all regions closed. 2024-12-01T23:43:09,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_1073741829_1019 (size=2751) 2024-12-01T23:43:09,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_1073741829_1019 (size=2751) 2024-12-01T23:43:09,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741829_1019 (size=2751) 2024-12-01T23:43:09,180 DEBUG [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs 2024-12-01T23:43:09,180 INFO [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C42367%2C1733096585284.meta:.meta(num 1733096587489) 2024-12-01T23:43:09,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741828_1018 (size=1298) 2024-12-01T23:43:09,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_1073741828_1018 (size=1298) 2024-12-01T23:43:09,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_1073741828_1018 (size=1298) 2024-12-01T23:43:09,188 DEBUG [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/oldWALs 2024-12-01T23:43:09,188 INFO [RS:0;579b0f681375:42367 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C42367%2C1733096585284:(num 1733096587014) 2024-12-01T23:43:09,188 DEBUG [RS:0;579b0f681375:42367 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:09,188 INFO [RS:0;579b0f681375:42367 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:09,188 INFO [RS:0;579b0f681375:42367 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:09,188 INFO [RS:0;579b0f681375:42367 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:09,188 INFO [RS:0;579b0f681375:42367 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:09,189 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:09,189 INFO [RS:0;579b0f681375:42367 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42367 2024-12-01T23:43:09,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,42367,1733096585284 2024-12-01T23:43:09,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:09,215 INFO [RS:0;579b0f681375:42367 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:09,223 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,42367,1733096585284] 2024-12-01T23:43:09,232 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,42367,1733096585284 already deleted, retry=false 2024-12-01T23:43:09,232 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,42367,1733096585284 expired; onlineServers=0 2024-12-01T23:43:09,232 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '579b0f681375,46325,1733096584597' ***** 2024-12-01T23:43:09,232 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T23:43:09,233 INFO [M:0;579b0f681375:46325 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:09,233 INFO [M:0;579b0f681375:46325 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:09,233 DEBUG [M:0;579b0f681375:46325 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T23:43:09,233 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T23:43:09,233 DEBUG [M:0;579b0f681375:46325 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T23:43:09,233 DEBUG [master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733096586675 {}] cleaner.HFileCleaner(306): Exit Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733096586675,5,FailOnTimeoutGroup] 2024-12-01T23:43:09,233 DEBUG [master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733096586679 {}] cleaner.HFileCleaner(306): Exit Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733096586679,5,FailOnTimeoutGroup] 2024-12-01T23:43:09,234 INFO [M:0;579b0f681375:46325 {}] hbase.ChoreService(370): Chore service for: master/579b0f681375:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:09,234 INFO [M:0;579b0f681375:46325 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:09,235 DEBUG [M:0;579b0f681375:46325 {}] master.HMaster(1795): Stopping service threads 2024-12-01T23:43:09,235 INFO [M:0;579b0f681375:46325 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T23:43:09,235 INFO [M:0;579b0f681375:46325 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T23:43:09,236 INFO [M:0;579b0f681375:46325 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T23:43:09,237 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T23:43:09,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:09,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:09,241 DEBUG [M:0;579b0f681375:46325 {}] zookeeper.ZKUtil(347): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T23:43:09,241 WARN [M:0;579b0f681375:46325 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T23:43:09,243 INFO [M:0;579b0f681375:46325 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/.lastflushedseqids 2024-12-01T23:43:09,253 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,253 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:47356 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35045:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47356 dst: /127.0.0.1:35045 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-01T23:43:09,261 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,261 INFO [M:0;579b0f681375:46325 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T23:43:09,261 INFO [M:0;579b0f681375:46325 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T23:43:09,261 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T23:43:09,261 INFO [M:0;579b0f681375:46325 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:09,261 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:09,261 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T23:43:09,261 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:09,262 INFO [M:0;579b0f681375:46325 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-01T23:43:09,280 DEBUG [M:0;579b0f681375:46325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fa6932ec064e4d4695e38d1e5662d06a is 82, key is hbase:meta,,1/info:regioninfo/1733096587571/Put/seqid=0 2024-12-01T23:43:09,283 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,283 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:33110 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:38241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33110 dst: /127.0.0.1:38241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-01T23:43:09,290 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,291 INFO [M:0;579b0f681375:46325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fa6932ec064e4d4695e38d1e5662d06a 2024-12-01T23:43:09,315 DEBUG [M:0;579b0f681375:46325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab57dab1cca748b9853d01710a922a76 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733096588347/Put/seqid=0 2024-12-01T23:43:09,318 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,318 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:33134 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:38241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33134 dst: /127.0.0.1:38241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,324 INFO [RS:0;579b0f681375:42367 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:09,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42367-0x1019490c5560001, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,324 INFO [RS:0;579b0f681375:42367 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,42367,1733096585284; zookeeper connection closed. 2024-12-01T23:43:09,324 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@766a6d17 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@766a6d17 2024-12-01T23:43:09,325 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T23:43:09,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775552_1037 (size=6438) 2024-12-01T23:43:09,326 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,326 INFO [M:0;579b0f681375:46325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab57dab1cca748b9853d01710a922a76 2024-12-01T23:43:09,350 DEBUG [M:0;579b0f681375:46325 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/da591d75253f40a0b9b968d6646b315f is 69, key is 579b0f681375,42187,1733096585405/rs:state/1733096586742/Put/seqid=0 2024-12-01T23:43:09,353 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,353 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T23:43:09,358 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1693050314_22 at /127.0.0.1:33150 [Receiving block BP-1127404106-172.17.0.2-1733096580555:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:38241:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33150 dst: /127.0.0.1:38241 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:43:09,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-01T23:43:09,362 WARN [M:0;579b0f681375:46325 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T23:43:09,362 INFO [M:0;579b0f681375:46325 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/da591d75253f40a0b9b968d6646b315f 2024-12-01T23:43:09,371 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fa6932ec064e4d4695e38d1e5662d06a as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fa6932ec064e4d4695e38d1e5662d06a 2024-12-01T23:43:09,379 INFO [M:0;579b0f681375:46325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fa6932ec064e4d4695e38d1e5662d06a, entries=8, sequenceid=72, filesize=5.5 K 2024-12-01T23:43:09,380 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ab57dab1cca748b9853d01710a922a76 as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab57dab1cca748b9853d01710a922a76 2024-12-01T23:43:09,388 INFO [M:0;579b0f681375:46325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ab57dab1cca748b9853d01710a922a76, entries=8, sequenceid=72, filesize=6.3 K 2024-12-01T23:43:09,390 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/da591d75253f40a0b9b968d6646b315f as hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/da591d75253f40a0b9b968d6646b315f 2024-12-01T23:43:09,399 INFO [M:0;579b0f681375:46325 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/da591d75253f40a0b9b968d6646b315f, entries=3, sequenceid=72, filesize=5.2 K 2024-12-01T23:43:09,401 INFO [M:0;579b0f681375:46325 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=72, compaction requested=false 2024-12-01T23:43:09,402 INFO [M:0;579b0f681375:46325 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:09,402 DEBUG [M:0;579b0f681375:46325 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733096589261Disabling compacts and flushes for region at 1733096589261Disabling writes for close at 1733096589261Obtaining lock to block concurrent updates at 1733096589262 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733096589262Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733096589262Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733096589263 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733096589263Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733096589280 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733096589280Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733096589299 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733096589315 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733096589315Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733096589335 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733096589350 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733096589350Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@729dbc20: reopening flushed file at 1733096589370 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@553f5bb1: reopening flushed file at 1733096589379 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a62224b: reopening flushed file at 1733096589388 (+9 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=72, compaction requested=false at 1733096589401 (+13 ms)Writing region close event to WAL at 1733096589402 (+1 ms)Closed at 1733096589402 2024-12-01T23:43:09,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_1073741825_1011 (size=32665) 2024-12-01T23:43:09,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_1073741825_1011 (size=32665) 2024-12-01T23:43:09,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_1073741825_1011 (size=32665) 2024-12-01T23:43:09,407 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:09,407 INFO [M:0;579b0f681375:46325 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T23:43:09,407 INFO [M:0;579b0f681375:46325 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46325 2024-12-01T23:43:09,407 INFO [M:0;579b0f681375:46325 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:09,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38241 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-01T23:43:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-01T23:43:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-01T23:43:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-01T23:43:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35045 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-01T23:43:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46667 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-01T23:43:09,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,540 INFO [M:0;579b0f681375:46325 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:09,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46325-0x1019490c5560000, quorum=127.0.0.1:60672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:09,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f750918{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:09,551 WARN [BP-1127404106-172.17.0.2-1733096580555 heartbeating to localhost/127.0.0.1:40603 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1127404106-172.17.0.2-1733096580555 (Datanode Uuid de8f40b9-0782-4848-bddd-65d491feb1e8) service to localhost/127.0.0.1:40603 2024-12-01T23:43:09,552 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@86bf2a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:09,552 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:09,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1023f385{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:09,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e1f796{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:09,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data5/current/BP-1127404106-172.17.0.2-1733096580555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:09,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data6/current/BP-1127404106-172.17.0.2-1733096580555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:09,556 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:43:09,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26b068f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:09,558 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5739b847{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:09,558 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:09,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c2c5be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:09,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a91ec1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:09,560 WARN [BP-1127404106-172.17.0.2-1733096580555 heartbeating to localhost/127.0.0.1:40603 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:43:09,560 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:43:09,560 WARN [BP-1127404106-172.17.0.2-1733096580555 heartbeating to localhost/127.0.0.1:40603 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1127404106-172.17.0.2-1733096580555 (Datanode Uuid b762d51f-4bfb-42cc-b5ba-48b4a0f2ecfa) service to localhost/127.0.0.1:40603 2024-12-01T23:43:09,560 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:43:09,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data3/current/BP-1127404106-172.17.0.2-1733096580555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:09,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data4/current/BP-1127404106-172.17.0.2-1733096580555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:09,561 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:43:09,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e705dc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:09,568 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ad1569e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:09,568 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:09,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17f1c7fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:09,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32fec40a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:09,570 WARN [BP-1127404106-172.17.0.2-1733096580555 heartbeating to localhost/127.0.0.1:40603 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:43:09,570 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:43:09,570 WARN [BP-1127404106-172.17.0.2-1733096580555 heartbeating to localhost/127.0.0.1:40603 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1127404106-172.17.0.2-1733096580555 (Datanode Uuid 9928f13c-27b3-4c1e-a9b5-bfdb63528212) service to localhost/127.0.0.1:40603 2024-12-01T23:43:09,570 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:43:09,570 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data1/current/BP-1127404106-172.17.0.2-1733096580555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:09,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/cluster_2055d287-0675-b6e7-86de-2206632218a2/data/data2/current/BP-1127404106-172.17.0.2-1733096580555 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:09,571 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:43:09,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T23:43:09,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:09,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:09,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:09,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:09,589 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T23:43:09,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-01T23:43:09,625 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=437 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=237 (was 205) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7263 (was 7556) 2024-12-01T23:43:09,630 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=437, MaxFileDescriptor=1048576, SystemLoadAverage=237, ProcessCount=11, AvailableMemoryMB=7263 2024-12-01T23:43:09,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.log.dir so I do NOT create it in target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/77b85d21-9450-4c50-7330-1351ad081841/hadoop.tmp.dir so I do NOT create it in target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1, deleteOnExit=true 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/test.cache.data in system properties and HBase conf 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir in system properties and HBase conf 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T23:43:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T23:43:09,632 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T23:43:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/nfs.dump.dir in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/java.io.tmpdir in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T23:43:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T23:43:09,876 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:09,880 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:09,882 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:09,882 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:09,882 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T23:43:09,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:09,883 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f7a7804{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:09,883 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f1fd8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:09,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@200f3fb2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/java.io.tmpdir/jetty-localhost-40839-hadoop-hdfs-3_4_1-tests_jar-_-any-8631276151346855227/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T23:43:09,973 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38845bbf{HTTP/1.1, (http/1.1)}{localhost:40839} 2024-12-01T23:43:09,973 INFO [Time-limited test {}] server.Server(415): Started @11207ms 2024-12-01T23:43:10,183 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:10,185 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:10,186 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:10,186 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:10,186 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T23:43:10,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b7605af{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:10,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@376a5039{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:10,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7adc0795{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/java.io.tmpdir/jetty-localhost-37253-hadoop-hdfs-3_4_1-tests_jar-_-any-2940100977509808345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:10,277 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3887b7b1{HTTP/1.1, (http/1.1)}{localhost:37253} 2024-12-01T23:43:10,277 INFO [Time-limited test {}] server.Server(415): Started @11511ms 2024-12-01T23:43:10,279 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:43:10,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:10,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:10,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:10,317 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:10,317 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T23:43:10,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@538424cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:10,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60e07a4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:10,408 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73a6352c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/java.io.tmpdir/jetty-localhost-34273-hadoop-hdfs-3_4_1-tests_jar-_-any-15143994574188578764/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:10,408 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c1940b2{HTTP/1.1, (http/1.1)}{localhost:34273} 2024-12-01T23:43:10,408 INFO [Time-limited test {}] server.Server(415): Started @11642ms 2024-12-01T23:43:10,409 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:43:10,435 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:43:10,438 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:43:10,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:43:10,439 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:43:10,439 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T23:43:10,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19e751cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:43:10,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70ba2e51{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:43:10,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@458a3cce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/java.io.tmpdir/jetty-localhost-34443-hadoop-hdfs-3_4_1-tests_jar-_-any-13858797790329381482/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:10,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22d1a9e{HTTP/1.1, (http/1.1)}{localhost:34443} 2024-12-01T23:43:10,529 INFO [Time-limited test {}] server.Server(415): Started @11764ms 2024-12-01T23:43:10,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:43:10,988 WARN [Thread-553 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data2/current/BP-556674411-172.17.0.2-1733096589656/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:10,988 WARN [Thread-552 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data1/current/BP-556674411-172.17.0.2-1733096589656/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:11,005 WARN [Thread-495 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:43:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81f99f2bbec032ca with lease ID 0x34b6ec410e46f9e2: Processing first storage report for DS-645ece82-66ca-48c2-9451-b3d4fa5456b6 from datanode DatanodeRegistration(127.0.0.1:42661, datanodeUuid=7d4a2d10-d5a1-4bd8-b709-5deaf52cdecf, infoPort=40943, infoSecurePort=0, ipcPort=42455, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656) 2024-12-01T23:43:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81f99f2bbec032ca with lease ID 0x34b6ec410e46f9e2: from storage DS-645ece82-66ca-48c2-9451-b3d4fa5456b6 node DatanodeRegistration(127.0.0.1:42661, datanodeUuid=7d4a2d10-d5a1-4bd8-b709-5deaf52cdecf, infoPort=40943, infoSecurePort=0, ipcPort=42455, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x81f99f2bbec032ca with lease ID 0x34b6ec410e46f9e2: Processing first storage report for DS-06c4fb99-681c-43d1-aab4-ebcdc73bc9ec from datanode DatanodeRegistration(127.0.0.1:42661, datanodeUuid=7d4a2d10-d5a1-4bd8-b709-5deaf52cdecf, infoPort=40943, infoSecurePort=0, ipcPort=42455, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656) 2024-12-01T23:43:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x81f99f2bbec032ca with lease ID 0x34b6ec410e46f9e2: from storage DS-06c4fb99-681c-43d1-aab4-ebcdc73bc9ec node DatanodeRegistration(127.0.0.1:42661, datanodeUuid=7d4a2d10-d5a1-4bd8-b709-5deaf52cdecf, infoPort=40943, infoSecurePort=0, ipcPort=42455, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:11,245 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data3/current/BP-556674411-172.17.0.2-1733096589656/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:11,245 WARN [Thread-567 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data4/current/BP-556674411-172.17.0.2-1733096589656/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:11,264 WARN [Thread-518 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:43:11,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa77cf2b0de13930e with lease ID 0x34b6ec410e46f9e3: Processing first storage report for DS-fa178854-603d-4e90-8361-f8e01b88478a from datanode DatanodeRegistration(127.0.0.1:33147, datanodeUuid=f1545059-cd32-427a-870d-49229ae4b309, infoPort=44479, infoSecurePort=0, ipcPort=41285, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656) 2024-12-01T23:43:11,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa77cf2b0de13930e with lease ID 0x34b6ec410e46f9e3: from storage DS-fa178854-603d-4e90-8361-f8e01b88478a node DatanodeRegistration(127.0.0.1:33147, datanodeUuid=f1545059-cd32-427a-870d-49229ae4b309, infoPort=44479, infoSecurePort=0, ipcPort=41285, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:11,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa77cf2b0de13930e with lease ID 0x34b6ec410e46f9e3: Processing first storage report for DS-b385aa1c-028a-4f81-81e6-0d7fab982441 from datanode DatanodeRegistration(127.0.0.1:33147, datanodeUuid=f1545059-cd32-427a-870d-49229ae4b309, infoPort=44479, infoSecurePort=0, ipcPort=41285, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656) 2024-12-01T23:43:11,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa77cf2b0de13930e with lease ID 0x34b6ec410e46f9e3: from storage DS-b385aa1c-028a-4f81-81e6-0d7fab982441 node DatanodeRegistration(127.0.0.1:33147, datanodeUuid=f1545059-cd32-427a-870d-49229ae4b309, infoPort=44479, infoSecurePort=0, ipcPort=41285, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:11,323 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data5/current/BP-556674411-172.17.0.2-1733096589656/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:11,323 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data6/current/BP-556674411-172.17.0.2-1733096589656/current, will proceed with Du for space computation calculation, 2024-12-01T23:43:11,344 WARN [Thread-541 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:43:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x961c8c4f352138d7 with lease ID 0x34b6ec410e46f9e4: Processing first storage report for DS-c51e982d-7d9e-4a09-a751-dbad410aeca5 from datanode DatanodeRegistration(127.0.0.1:44777, datanodeUuid=c7621785-d443-4001-bee3-5fdcc65005c7, infoPort=33923, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656) 2024-12-01T23:43:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x961c8c4f352138d7 with lease ID 0x34b6ec410e46f9e4: from storage DS-c51e982d-7d9e-4a09-a751-dbad410aeca5 node DatanodeRegistration(127.0.0.1:44777, datanodeUuid=c7621785-d443-4001-bee3-5fdcc65005c7, infoPort=33923, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:43:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x961c8c4f352138d7 with lease ID 0x34b6ec410e46f9e4: Processing first storage report for DS-a6646a11-47ac-4319-bb80-debe29468634 from datanode DatanodeRegistration(127.0.0.1:44777, datanodeUuid=c7621785-d443-4001-bee3-5fdcc65005c7, infoPort=33923, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656) 2024-12-01T23:43:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x961c8c4f352138d7 with lease ID 0x34b6ec410e46f9e4: from storage DS-a6646a11-47ac-4319-bb80-debe29468634 node DatanodeRegistration(127.0.0.1:44777, datanodeUuid=c7621785-d443-4001-bee3-5fdcc65005c7, infoPort=33923, infoSecurePort=0, ipcPort=43499, storageInfo=lv=-57;cid=testClusterID;nsid=666110328;c=1733096589656), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T23:43:11,362 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8 2024-12-01T23:43:11,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/zookeeper_0, clientPort=55440, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T23:43:11,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55440 2024-12-01T23:43:11,366 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,367 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741825_1001 (size=7) 2024-12-01T23:43:11,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741825_1001 (size=7) 2024-12-01T23:43:11,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741825_1001 (size=7) 2024-12-01T23:43:11,382 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a with version=8 2024-12-01T23:43:11,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40603/user/jenkins/test-data/048758bd-db85-a334-0753-936c42a3140d/hbase-staging 2024-12-01T23:43:11,385 INFO [Time-limited test {}] client.ConnectionUtils(128): master/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:11,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,385 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:11,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:11,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T23:43:11,386 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:11,386 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36755 2024-12-01T23:43:11,388 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36755 connecting to ZooKeeper ensemble=127.0.0.1:55440 2024-12-01T23:43:11,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:367550x0, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:11,433 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36755-0x1019490e2c90000 connected 2024-12-01T23:43:11,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,511 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,515 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:11,515 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a, hbase.cluster.distributed=false 2024-12-01T23:43:11,518 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:11,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36755 2024-12-01T23:43:11,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36755 2024-12-01T23:43:11,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36755 2024-12-01T23:43:11,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36755 2024-12-01T23:43:11,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36755 2024-12-01T23:43:11,538 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:43:11,538 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:11,539 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37655 2024-12-01T23:43:11,540 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37655 connecting to ZooKeeper ensemble=127.0.0.1:55440 2024-12-01T23:43:11,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376550x0, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:11,557 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37655-0x1019490e2c90001 connected 2024-12-01T23:43:11,557 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:11,557 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:43:11,558 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:43:11,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:43:11,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:11,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37655 2024-12-01T23:43:11,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37655 2024-12-01T23:43:11,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37655 2024-12-01T23:43:11,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37655 2024-12-01T23:43:11,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37655 2024-12-01T23:43:11,579 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:43:11,580 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:11,581 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42565 2024-12-01T23:43:11,582 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42565 connecting to ZooKeeper ensemble=127.0.0.1:55440 2024-12-01T23:43:11,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425650x0, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:11,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42565-0x1019490e2c90002 connected 2024-12-01T23:43:11,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:11,597 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:43:11,597 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:43:11,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:43:11,600 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:11,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42565 2024-12-01T23:43:11,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42565 2024-12-01T23:43:11,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42565 2024-12-01T23:43:11,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42565 2024-12-01T23:43:11,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42565 2024-12-01T23:43:11,620 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:43:11,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,620 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:43:11,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:43:11,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:43:11,621 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:43:11,621 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:43:11,621 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34173 2024-12-01T23:43:11,623 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34173 connecting to ZooKeeper ensemble=127.0.0.1:55440 2024-12-01T23:43:11,624 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341730x0, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:43:11,637 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34173-0x1019490e2c90003 connected 2024-12-01T23:43:11,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:11,638 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:43:11,638 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:43:11,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:43:11,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:43:11,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34173 2024-12-01T23:43:11,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34173 2024-12-01T23:43:11,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34173 2024-12-01T23:43:11,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34173 2024-12-01T23:43:11,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34173 2024-12-01T23:43:11,655 DEBUG [M:0;579b0f681375:36755 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;579b0f681375:36755 2024-12-01T23:43:11,656 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/579b0f681375,36755,1733096591385 2024-12-01T23:43:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,665 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/579b0f681375,36755,1733096591385 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,674 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T23:43:11,674 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/579b0f681375,36755,1733096591385 from backup master directory 2024-12-01T23:43:11,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/579b0f681375,36755,1733096591385 2024-12-01T23:43:11,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:43:11,681 WARN [master/579b0f681375:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:11,681 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=579b0f681375,36755,1733096591385 2024-12-01T23:43:11,686 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/hbase.id] with ID: a4c341b4-6f38-473a-ac8e-c4910f853b87 2024-12-01T23:43:11,686 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/.tmp/hbase.id 2024-12-01T23:43:11,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741826_1002 (size=42) 2024-12-01T23:43:11,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741826_1002 (size=42) 2024-12-01T23:43:11,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741826_1002 (size=42) 2024-12-01T23:43:11,697 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/.tmp/hbase.id]:[hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/hbase.id] 2024-12-01T23:43:11,713 INFO [master/579b0f681375:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:43:11,713 INFO [master/579b0f681375:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T23:43:11,714 INFO [master/579b0f681375:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-01T23:43:11,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741827_1003 (size=196) 2024-12-01T23:43:11,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741827_1003 (size=196) 2024-12-01T23:43:11,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741827_1003 (size=196) 2024-12-01T23:43:11,734 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:43:11,734 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T23:43:11,735 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T23:43:11,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741828_1004 (size=1189) 2024-12-01T23:43:11,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741828_1004 (size=1189) 2024-12-01T23:43:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741828_1004 (size=1189) 2024-12-01T23:43:11,748 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store 2024-12-01T23:43:11,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741829_1005 (size=34) 2024-12-01T23:43:11,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741829_1005 (size=34) 2024-12-01T23:43:11,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741829_1005 (size=34) 2024-12-01T23:43:11,757 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:11,758 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T23:43:11,758 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:11,758 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:11,758 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T23:43:11,758 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:11,758 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:11,758 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733096591758Disabling compacts and flushes for region at 1733096591758Disabling writes for close at 1733096591758Writing region close event to WAL at 1733096591758Closed at 1733096591758 2024-12-01T23:43:11,759 WARN [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/.initializing 2024-12-01T23:43:11,760 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/WALs/579b0f681375,36755,1733096591385 2024-12-01T23:43:11,764 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C36755%2C1733096591385, suffix=, logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/WALs/579b0f681375,36755,1733096591385, archiveDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/oldWALs, maxLogs=10 2024-12-01T23:43:11,765 INFO [master/579b0f681375:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 579b0f681375%2C36755%2C1733096591385.1733096591764 2024-12-01T23:43:11,775 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/WALs/579b0f681375,36755,1733096591385/579b0f681375%2C36755%2C1733096591385.1733096591764 2024-12-01T23:43:11,777 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40943:40943),(127.0.0.1/127.0.0.1:33923:33923),(127.0.0.1/127.0.0.1:44479:44479)] 2024-12-01T23:43:11,778 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:43:11,778 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:11,778 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,778 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T23:43:11,781 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:11,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T23:43:11,784 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:11,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T23:43:11,786 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:11,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T23:43:11,789 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:11,790 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,790 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,791 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,793 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,793 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,794 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:43:11,795 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:43:11,798 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:43:11,798 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62045406, jitterRate=-0.07545140385627747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:43:11,799 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733096591778Initializing all the Stores at 1733096591779 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096591779Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096591780 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096591780Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096591780Cleaning up temporary data from old regions at 1733096591793 (+13 ms)Region opened successfully at 1733096591799 (+6 ms) 2024-12-01T23:43:11,799 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T23:43:11,803 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142c34a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:11,804 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T23:43:11,804 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T23:43:11,804 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T23:43:11,805 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T23:43:11,805 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T23:43:11,806 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-01T23:43:11,806 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T23:43:11,808 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T23:43:11,809 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T23:43:11,823 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T23:43:11,823 INFO [master/579b0f681375:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T23:43:11,824 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T23:43:11,831 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T23:43:11,831 INFO [master/579b0f681375:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T23:43:11,832 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T23:43:11,839 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T23:43:11,840 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T23:43:11,848 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T23:43:11,850 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T23:43:11,856 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,866 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=579b0f681375,36755,1733096591385, sessionid=0x1019490e2c90000, setting cluster-up flag (Was=false) 2024-12-01T23:43:11,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,906 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T23:43:11,908 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=579b0f681375,36755,1733096591385 2024-12-01T23:43:11,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:11,948 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T23:43:11,951 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=579b0f681375,36755,1733096591385 2024-12-01T23:43:11,954 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T23:43:11,959 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:11,959 INFO [master/579b0f681375:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T23:43:11,959 INFO [master/579b0f681375:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T23:43:11,959 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 579b0f681375,36755,1733096591385 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/579b0f681375:0, corePoolSize=10, maxPoolSize=10 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:11,961 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:11,962 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733096621962 2024-12-01T23:43:11,962 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T23:43:11,962 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T23:43:11,963 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T23:43:11,964 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:11,964 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T23:43:11,964 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T23:43:11,964 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T23:43:11,964 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733096591964,5,FailOnTimeoutGroup] 2024-12-01T23:43:11,965 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733096591965,5,FailOnTimeoutGroup] 2024-12-01T23:43:11,965 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:11,965 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T23:43:11,965 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:11,965 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:11,965 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,965 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T23:43:11,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741831_1007 (size=1321) 2024-12-01T23:43:11,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741831_1007 (size=1321) 2024-12-01T23:43:11,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741831_1007 (size=1321) 2024-12-01T23:43:11,977 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T23:43:11,977 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a 2024-12-01T23:43:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741832_1008 (size=32) 2024-12-01T23:43:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741832_1008 (size=32) 2024-12-01T23:43:11,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741832_1008 (size=32) 2024-12-01T23:43:11,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:11,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T23:43:11,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T23:43:11,992 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:11,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T23:43:11,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T23:43:11,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:11,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T23:43:11,998 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T23:43:11,998 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:11,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:11,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T23:43:12,001 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T23:43:12,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:12,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:12,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T23:43:12,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740 2024-12-01T23:43:12,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740 2024-12-01T23:43:12,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T23:43:12,005 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T23:43:12,006 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:43:12,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T23:43:12,010 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:43:12,011 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73009279, jitterRate=0.08792303502559662}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:43:12,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733096591988Initializing all the Stores at 1733096591989 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096591989Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096591989Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096591990 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096591990Cleaning up temporary data from old regions at 1733096592005 (+15 ms)Region opened successfully at 1733096592012 (+7 ms) 2024-12-01T23:43:12,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T23:43:12,012 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T23:43:12,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T23:43:12,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T23:43:12,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T23:43:12,013 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T23:43:12,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733096592012Disabling compacts and flushes for region at 1733096592012Disabling writes for close at 1733096592012Writing region close event to WAL at 1733096592013 (+1 ms)Closed at 1733096592013 2024-12-01T23:43:12,015 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:12,015 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T23:43:12,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T23:43:12,017 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T23:43:12,019 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T23:43:12,044 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(746): ClusterId : a4c341b4-6f38-473a-ac8e-c4910f853b87 2024-12-01T23:43:12,044 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(746): ClusterId : a4c341b4-6f38-473a-ac8e-c4910f853b87 2024-12-01T23:43:12,044 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(746): ClusterId : a4c341b4-6f38-473a-ac8e-c4910f853b87 2024-12-01T23:43:12,044 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:43:12,044 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:43:12,044 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:43:12,081 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:43:12,081 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:43:12,081 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:43:12,081 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:43:12,081 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:43:12,081 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:43:12,092 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:43:12,092 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:43:12,092 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:43:12,093 DEBUG [RS:2;579b0f681375:34173 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@234c21d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:12,093 DEBUG [RS:1;579b0f681375:42565 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7649fe68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:12,094 DEBUG [RS:0;579b0f681375:37655 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@704c3a07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:43:12,106 DEBUG [RS:2;579b0f681375:34173 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;579b0f681375:34173 2024-12-01T23:43:12,106 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;579b0f681375:42565 2024-12-01T23:43:12,106 DEBUG [RS:0;579b0f681375:37655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;579b0f681375:37655 2024-12-01T23:43:12,106 INFO [RS:1;579b0f681375:42565 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:43:12,106 INFO [RS:2;579b0f681375:34173 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:43:12,106 INFO [RS:0;579b0f681375:37655 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:43:12,106 INFO [RS:2;579b0f681375:34173 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:43:12,106 INFO [RS:1;579b0f681375:42565 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:43:12,106 INFO [RS:0;579b0f681375:37655 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:43:12,106 DEBUG [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:43:12,106 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:43:12,107 DEBUG [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:43:12,107 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,36755,1733096591385 with port=34173, startcode=1733096591620 2024-12-01T23:43:12,107 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,36755,1733096591385 with port=37655, startcode=1733096591537 2024-12-01T23:43:12,107 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,36755,1733096591385 with port=42565, startcode=1733096591579 2024-12-01T23:43:12,108 DEBUG [RS:0;579b0f681375:37655 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:43:12,108 DEBUG [RS:2;579b0f681375:34173 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:43:12,108 DEBUG [RS:1;579b0f681375:42565 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:43:12,110 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47113, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:43:12,110 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57291, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:43:12,110 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45731, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:43:12,111 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36755 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,42565,1733096591579 2024-12-01T23:43:12,111 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36755 {}] master.ServerManager(517): Registering regionserver=579b0f681375,42565,1733096591579 2024-12-01T23:43:12,114 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36755 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,34173,1733096591620 2024-12-01T23:43:12,114 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a 2024-12-01T23:43:12,114 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36755 {}] master.ServerManager(517): Registering regionserver=579b0f681375,34173,1733096591620 2024-12-01T23:43:12,114 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39367 2024-12-01T23:43:12,114 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:43:12,116 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36755 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,37655,1733096591537 2024-12-01T23:43:12,116 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36755 {}] master.ServerManager(517): Registering regionserver=579b0f681375,37655,1733096591537 2024-12-01T23:43:12,116 DEBUG [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a 2024-12-01T23:43:12,116 DEBUG [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39367 2024-12-01T23:43:12,116 DEBUG [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:43:12,119 DEBUG [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a 2024-12-01T23:43:12,119 DEBUG [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39367 2024-12-01T23:43:12,119 DEBUG [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:43:12,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:12,154 DEBUG [RS:1;579b0f681375:42565 {}] zookeeper.ZKUtil(111): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,42565,1733096591579 2024-12-01T23:43:12,154 WARN [RS:1;579b0f681375:42565 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:12,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,37655,1733096591537] 2024-12-01T23:43:12,154 DEBUG [RS:0;579b0f681375:37655 {}] zookeeper.ZKUtil(111): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,37655,1733096591537 2024-12-01T23:43:12,154 INFO [RS:1;579b0f681375:42565 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T23:43:12,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,42565,1733096591579] 2024-12-01T23:43:12,154 WARN [RS:0;579b0f681375:37655 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:12,154 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,34173,1733096591620] 2024-12-01T23:43:12,154 DEBUG [RS:2;579b0f681375:34173 {}] zookeeper.ZKUtil(111): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,34173,1733096591620 2024-12-01T23:43:12,154 INFO [RS:0;579b0f681375:37655 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T23:43:12,154 WARN [RS:2;579b0f681375:34173 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:43:12,154 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,42565,1733096591579 2024-12-01T23:43:12,155 DEBUG [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,37655,1733096591537 2024-12-01T23:43:12,155 INFO [RS:2;579b0f681375:34173 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T23:43:12,155 DEBUG [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,34173,1733096591620 2024-12-01T23:43:12,161 INFO [RS:1;579b0f681375:42565 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:43:12,161 INFO [RS:0;579b0f681375:37655 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:43:12,165 INFO [RS:2;579b0f681375:34173 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:43:12,166 INFO [RS:1;579b0f681375:42565 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:43:12,167 INFO [RS:0;579b0f681375:37655 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:43:12,167 INFO [RS:1;579b0f681375:42565 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:43:12,167 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,168 INFO [RS:0;579b0f681375:37655 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:43:12,168 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,169 WARN [579b0f681375:36755 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T23:43:12,170 INFO [RS:2;579b0f681375:34173 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:43:12,174 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:43:12,174 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:43:12,174 INFO [RS:2;579b0f681375:34173 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:43:12,174 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,175 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:43:12,175 INFO [RS:0;579b0f681375:37655 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:43:12,175 INFO [RS:1;579b0f681375:42565 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:43:12,175 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,175 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,175 INFO [RS:2;579b0f681375:34173 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:43:12,175 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,175 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,176 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,177 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,177 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,177 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,177 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:12,177 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:43:12,177 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:12,177 DEBUG [RS:0;579b0f681375:37655 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:12,177 DEBUG [RS:1;579b0f681375:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:12,177 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:12,177 DEBUG [RS:2;579b0f681375:34173 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:43:12,181 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,181 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,42565,1733096591579-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,37655,1733096591537-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,182 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,34173,1733096591620-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:12,199 INFO [RS:2;579b0f681375:34173 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:43:12,199 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,34173,1733096591620-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,200 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,200 INFO [RS:2;579b0f681375:34173 {}] regionserver.Replication(171): 579b0f681375,34173,1733096591620 started 2024-12-01T23:43:12,200 INFO [RS:1;579b0f681375:42565 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:43:12,200 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,42565,1733096591579-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,201 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,201 INFO [RS:1;579b0f681375:42565 {}] regionserver.Replication(171): 579b0f681375,42565,1733096591579 started 2024-12-01T23:43:12,205 INFO [RS:0;579b0f681375:37655 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:43:12,206 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,37655,1733096591537-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,206 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,206 INFO [RS:0;579b0f681375:37655 {}] regionserver.Replication(171): 579b0f681375,37655,1733096591537 started 2024-12-01T23:43:12,213 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,213 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,34173,1733096591620, RpcServer on 579b0f681375/172.17.0.2:34173, sessionid=0x1019490e2c90003 2024-12-01T23:43:12,213 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:43:12,213 DEBUG [RS:2;579b0f681375:34173 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,34173,1733096591620 2024-12-01T23:43:12,213 DEBUG [RS:2;579b0f681375:34173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,34173,1733096591620' 2024-12-01T23:43:12,213 DEBUG [RS:2;579b0f681375:34173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:43:12,214 DEBUG [RS:2;579b0f681375:34173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:43:12,215 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:43:12,215 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:43:12,215 DEBUG [RS:2;579b0f681375:34173 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,34173,1733096591620 2024-12-01T23:43:12,215 DEBUG [RS:2;579b0f681375:34173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,34173,1733096591620' 2024-12-01T23:43:12,215 DEBUG [RS:2;579b0f681375:34173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:43:12,215 DEBUG [RS:2;579b0f681375:34173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:43:12,216 DEBUG [RS:2;579b0f681375:34173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:43:12,216 INFO [RS:2;579b0f681375:34173 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T23:43:12,216 INFO [RS:2;579b0f681375:34173 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T23:43:12,220 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,220 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,42565,1733096591579, RpcServer on 579b0f681375/172.17.0.2:42565, sessionid=0x1019490e2c90002 2024-12-01T23:43:12,221 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:43:12,221 DEBUG [RS:1;579b0f681375:42565 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,42565,1733096591579 2024-12-01T23:43:12,221 DEBUG [RS:1;579b0f681375:42565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,42565,1733096591579' 2024-12-01T23:43:12,221 DEBUG [RS:1;579b0f681375:42565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:43:12,221 DEBUG [RS:1;579b0f681375:42565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:43:12,222 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:43:12,222 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:43:12,222 DEBUG [RS:1;579b0f681375:42565 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,42565,1733096591579 2024-12-01T23:43:12,222 DEBUG [RS:1;579b0f681375:42565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,42565,1733096591579' 2024-12-01T23:43:12,222 DEBUG [RS:1;579b0f681375:42565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:43:12,223 DEBUG [RS:1;579b0f681375:42565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:43:12,223 DEBUG [RS:1;579b0f681375:42565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:43:12,223 INFO [RS:1;579b0f681375:42565 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T23:43:12,223 INFO [RS:1;579b0f681375:42565 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T23:43:12,225 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,225 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,37655,1733096591537, RpcServer on 579b0f681375/172.17.0.2:37655, sessionid=0x1019490e2c90001 2024-12-01T23:43:12,225 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:43:12,225 DEBUG [RS:0;579b0f681375:37655 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,37655,1733096591537 2024-12-01T23:43:12,225 DEBUG [RS:0;579b0f681375:37655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,37655,1733096591537' 2024-12-01T23:43:12,225 DEBUG [RS:0;579b0f681375:37655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:43:12,225 DEBUG [RS:0;579b0f681375:37655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:43:12,226 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:43:12,226 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:43:12,226 DEBUG [RS:0;579b0f681375:37655 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,37655,1733096591537 2024-12-01T23:43:12,226 DEBUG [RS:0;579b0f681375:37655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,37655,1733096591537' 2024-12-01T23:43:12,226 DEBUG [RS:0;579b0f681375:37655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:43:12,226 DEBUG [RS:0;579b0f681375:37655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:43:12,227 DEBUG [RS:0;579b0f681375:37655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:43:12,227 INFO [RS:0;579b0f681375:37655 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T23:43:12,227 INFO [RS:0;579b0f681375:37655 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T23:43:12,319 INFO [RS:2;579b0f681375:34173 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C34173%2C1733096591620, suffix=, logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,34173,1733096591620, archiveDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs, maxLogs=32 2024-12-01T23:43:12,320 INFO [RS:2;579b0f681375:34173 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 579b0f681375%2C34173%2C1733096591620.1733096592320 2024-12-01T23:43:12,325 INFO [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C42565%2C1733096591579, suffix=, logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,42565,1733096591579, archiveDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs, maxLogs=32 2024-12-01T23:43:12,326 INFO [RS:1;579b0f681375:42565 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 579b0f681375%2C42565%2C1733096591579.1733096592326 2024-12-01T23:43:12,329 INFO [RS:2;579b0f681375:34173 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,34173,1733096591620/579b0f681375%2C34173%2C1733096591620.1733096592320 2024-12-01T23:43:12,329 INFO [RS:0;579b0f681375:37655 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C37655%2C1733096591537, suffix=, logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,37655,1733096591537, archiveDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs, maxLogs=32 2024-12-01T23:43:12,330 INFO [RS:0;579b0f681375:37655 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 579b0f681375%2C37655%2C1733096591537.1733096592330 2024-12-01T23:43:12,333 DEBUG [RS:2;579b0f681375:34173 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40943:40943),(127.0.0.1/127.0.0.1:33923:33923),(127.0.0.1/127.0.0.1:44479:44479)] 2024-12-01T23:43:12,337 INFO [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,42565,1733096591579/579b0f681375%2C42565%2C1733096591579.1733096592326 2024-12-01T23:43:12,340 DEBUG [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44479:44479),(127.0.0.1/127.0.0.1:33923:33923),(127.0.0.1/127.0.0.1:40943:40943)] 2024-12-01T23:43:12,343 INFO [RS:0;579b0f681375:37655 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,37655,1733096591537/579b0f681375%2C37655%2C1733096591537.1733096592330 2024-12-01T23:43:12,346 DEBUG [RS:0;579b0f681375:37655 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44479:44479),(127.0.0.1/127.0.0.1:40943:40943),(127.0.0.1/127.0.0.1:33923:33923)] 2024-12-01T23:43:12,419 DEBUG [579b0f681375:36755 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T23:43:12,420 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:43:12,425 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:43:12,425 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:43:12,425 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T23:43:12,425 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:43:12,425 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:43:12,425 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T23:43:12,425 INFO [579b0f681375:36755 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:43:12,426 INFO [579b0f681375:36755 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:43:12,426 INFO [579b0f681375:36755 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T23:43:12,426 DEBUG [579b0f681375:36755 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:43:12,426 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=579b0f681375,42565,1733096591579 2024-12-01T23:43:12,430 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 579b0f681375,42565,1733096591579, state=OPENING 2024-12-01T23:43:12,445 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T23:43:12,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:12,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:12,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:12,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:12,457 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T23:43:12,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=579b0f681375,42565,1733096591579}] 2024-12-01T23:43:12,458 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,458 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,614 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T23:43:12,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55225, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T23:43:12,626 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T23:43:12,627 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T23:43:12,630 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C42565%2C1733096591579.meta, suffix=.meta, logDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,42565,1733096591579, archiveDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs, maxLogs=32 2024-12-01T23:43:12,631 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 579b0f681375%2C42565%2C1733096591579.meta.1733096592631.meta 2024-12-01T23:43:12,641 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/WALs/579b0f681375,42565,1733096591579/579b0f681375%2C42565%2C1733096591579.meta.1733096592631.meta 2024-12-01T23:43:12,642 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40943:40943),(127.0.0.1/127.0.0.1:44479:44479),(127.0.0.1/127.0.0.1:33923:33923)] 2024-12-01T23:43:12,643 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:43:12,643 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T23:43:12,644 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T23:43:12,644 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T23:43:12,644 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T23:43:12,644 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:12,644 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T23:43:12,644 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T23:43:12,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T23:43:12,647 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T23:43:12,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:12,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:12,648 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T23:43:12,650 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T23:43:12,650 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:12,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:12,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T23:43:12,652 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T23:43:12,652 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:12,652 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:12,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T23:43:12,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T23:43:12,654 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:12,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:43:12,655 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T23:43:12,656 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740 2024-12-01T23:43:12,657 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740 2024-12-01T23:43:12,659 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T23:43:12,659 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T23:43:12,660 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:43:12,661 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T23:43:12,662 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73555395, jitterRate=0.09606079757213593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:43:12,662 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T23:43:12,664 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733096592644Writing region info on filesystem at 1733096592644Initializing all the Stores at 1733096592646 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096592646Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096592646Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096592646Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733096592646Cleaning up temporary data from old regions at 1733096592659 (+13 ms)Running coprocessor post-open hooks at 1733096592663 (+4 ms)Region opened successfully at 1733096592663 2024-12-01T23:43:12,665 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733096592613 2024-12-01T23:43:12,668 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T23:43:12,668 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T23:43:12,669 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,42565,1733096591579 2024-12-01T23:43:12,671 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 579b0f681375,42565,1733096591579, state=OPEN 2024-12-01T23:43:12,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:12,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:12,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:12,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:43:12,687 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=579b0f681375,42565,1733096591579 2024-12-01T23:43:12,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,687 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:43:12,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T23:43:12,694 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=579b0f681375,42565,1733096591579 in 230 msec 2024-12-01T23:43:12,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T23:43:12,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 679 msec 2024-12-01T23:43:12,699 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:43:12,699 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T23:43:12,701 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:43:12,701 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,42565,1733096591579, seqNum=-1] 2024-12-01T23:43:12,701 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:43:12,703 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38605, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:43:12,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 755 msec 2024-12-01T23:43:12,713 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733096592713, completionTime=-1 2024-12-01T23:43:12,713 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T23:43:12,713 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T23:43:12,717 INFO [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T23:43:12,717 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733096652717 2024-12-01T23:43:12,717 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733096712717 2024-12-01T23:43:12,718 INFO [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-12-01T23:43:12,718 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,36755,1733096591385-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,719 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,36755,1733096591385-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,719 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,36755,1733096591385-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,719 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-579b0f681375:36755, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,719 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,719 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,723 DEBUG [master/579b0f681375:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.045sec 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,36755,1733096591385-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:43:12,726 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,36755,1733096591385-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T23:43:12,729 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T23:43:12,729 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T23:43:12,730 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,36755,1733096591385-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:43:12,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@170c6336, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:43:12,744 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 579b0f681375,36755,-1 for getting cluster id 2024-12-01T23:43:12,744 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T23:43:12,746 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a4c341b4-6f38-473a-ac8e-c4910f853b87' 2024-12-01T23:43:12,746 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T23:43:12,747 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a4c341b4-6f38-473a-ac8e-c4910f853b87" 2024-12-01T23:43:12,747 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e30ce90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:43:12,747 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [579b0f681375,36755,-1] 2024-12-01T23:43:12,747 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T23:43:12,748 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:12,749 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T23:43:12,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@376b00bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:43:12,751 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:43:12,752 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,42565,1733096591579, seqNum=-1] 2024-12-01T23:43:12,753 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:43:12,755 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:43:12,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=579b0f681375,36755,1733096591385 2024-12-01T23:43:12,758 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T23:43:12,760 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 579b0f681375,36755,1733096591385 2024-12-01T23:43:12,760 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10cbbd78 2024-12-01T23:43:12,760 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T23:43:12,762 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60724, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T23:43:12,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:43:12,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-01T23:43:12,767 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:43:12,767 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:12,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-01T23:43:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:12,769 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:43:12,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741837_1013 (size=392) 2024-12-01T23:43:12,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741837_1013 (size=392) 2024-12-01T23:43:12,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741837_1013 (size=392) 2024-12-01T23:43:12,782 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4c1dae718a04b6be4da87fc0e0181d8c, NAME => 'TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a 2024-12-01T23:43:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741838_1014 (size=51) 2024-12-01T23:43:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741838_1014 (size=51) 2024-12-01T23:43:12,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741838_1014 (size=51) 2024-12-01T23:43:12,794 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:12,794 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4c1dae718a04b6be4da87fc0e0181d8c, disabling compactions & flushes 2024-12-01T23:43:12,794 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:12,794 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:12,794 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. after waiting 0 ms 2024-12-01T23:43:12,794 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:12,794 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:12,794 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4c1dae718a04b6be4da87fc0e0181d8c: Waiting for close lock at 1733096592794Disabling compacts and flushes for region at 1733096592794Disabling writes for close at 1733096592794Writing region close event to WAL at 1733096592794Closed at 1733096592794 2024-12-01T23:43:12,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:43:12,797 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733096592796"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733096592796"}]},"ts":"1733096592796"} 2024-12-01T23:43:12,800 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T23:43:12,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:43:12,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733096592802"}]},"ts":"1733096592802"} 2024-12-01T23:43:12,806 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-01T23:43:12,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:43:12,807 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:43:12,807 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:43:12,807 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T23:43:12,808 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:43:12,808 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:43:12,808 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T23:43:12,808 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:43:12,808 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:43:12,808 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T23:43:12,808 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:43:12,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c1dae718a04b6be4da87fc0e0181d8c, ASSIGN}] 2024-12-01T23:43:12,810 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c1dae718a04b6be4da87fc0e0181d8c, ASSIGN 2024-12-01T23:43:12,812 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c1dae718a04b6be4da87fc0e0181d8c, ASSIGN; state=OFFLINE, location=579b0f681375,42565,1733096591579; forceNewPlan=false, retain=false 2024-12-01T23:43:12,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:12,962 INFO [579b0f681375:36755 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T23:43:12,963 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c1dae718a04b6be4da87fc0e0181d8c, regionState=OPENING, regionLocation=579b0f681375,42565,1733096591579 2024-12-01T23:43:12,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c1dae718a04b6be4da87fc0e0181d8c, ASSIGN because future has completed 2024-12-01T23:43:12,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c1dae718a04b6be4da87fc0e0181d8c, server=579b0f681375,42565,1733096591579}] 2024-12-01T23:43:13,041 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T23:43:13,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T23:43:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:13,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T23:43:13,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T23:43:13,127 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,127 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c1dae718a04b6be4da87fc0e0181d8c, NAME => 'TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:43:13,127 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,127 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:43:13,127 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,128 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,129 INFO [StoreOpener-4c1dae718a04b6be4da87fc0e0181d8c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,131 INFO [StoreOpener-4c1dae718a04b6be4da87fc0e0181d8c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c1dae718a04b6be4da87fc0e0181d8c columnFamilyName cf 2024-12-01T23:43:13,131 DEBUG [StoreOpener-4c1dae718a04b6be4da87fc0e0181d8c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:43:13,132 INFO [StoreOpener-4c1dae718a04b6be4da87fc0e0181d8c-1 {}] regionserver.HStore(327): Store=4c1dae718a04b6be4da87fc0e0181d8c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:43:13,132 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,133 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,133 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,134 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,134 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,136 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,139 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:43:13,139 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4c1dae718a04b6be4da87fc0e0181d8c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73563141, jitterRate=0.09617622196674347}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:43:13,139 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,140 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4c1dae718a04b6be4da87fc0e0181d8c: Running coprocessor pre-open hook at 1733096593128Writing region info on filesystem at 1733096593128Initializing all the Stores at 1733096593129 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733096593129Cleaning up temporary data from old regions at 1733096593134 (+5 ms)Running coprocessor post-open hooks at 1733096593139 (+5 ms)Region opened successfully at 1733096593140 (+1 ms) 2024-12-01T23:43:13,142 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c., pid=6, masterSystemTime=1733096593122 2024-12-01T23:43:13,145 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,145 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,147 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c1dae718a04b6be4da87fc0e0181d8c, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,42565,1733096591579 2024-12-01T23:43:13,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c1dae718a04b6be4da87fc0e0181d8c, server=579b0f681375,42565,1733096591579 because future has completed 2024-12-01T23:43:13,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T23:43:13,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4c1dae718a04b6be4da87fc0e0181d8c, server=579b0f681375,42565,1733096591579 in 184 msec 2024-12-01T23:43:13,161 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T23:43:13,161 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c1dae718a04b6be4da87fc0e0181d8c, ASSIGN in 348 msec 2024-12-01T23:43:13,165 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:43:13,166 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733096593165"}]},"ts":"1733096593165"} 2024-12-01T23:43:13,173 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-01T23:43:13,175 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:43:13,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 412 msec 2024-12-01T23:43:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T23:43:13,401 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T23:43:13,401 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-01T23:43:13,402 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:43:13,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-01T23:43:13,406 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:43:13,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-01T23:43:13,410 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c., hostname=579b0f681375,42565,1733096591579, seqNum=2] 2024-12-01T23:43:13,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-01T23:43:13,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-01T23:43:13,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:43:13,420 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-01T23:43:13,422 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T23:43:13,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T23:43:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:43:13,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42565 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-01T23:43:13,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,582 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4c1dae718a04b6be4da87fc0e0181d8c 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-01T23:43:13,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/.tmp/cf/db0d324e0e774de79c5c8bc773aff780 is 36, key is row/cf:cq/1733096593411/Put/seqid=0 2024-12-01T23:43:13,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741839_1015 (size=4787) 2024-12-01T23:43:13,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741839_1015 (size=4787) 2024-12-01T23:43:13,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741839_1015 (size=4787) 2024-12-01T23:43:13,608 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/.tmp/cf/db0d324e0e774de79c5c8bc773aff780 2024-12-01T23:43:13,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/.tmp/cf/db0d324e0e774de79c5c8bc773aff780 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/cf/db0d324e0e774de79c5c8bc773aff780 2024-12-01T23:43:13,625 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/cf/db0d324e0e774de79c5c8bc773aff780, entries=1, sequenceid=5, filesize=4.7 K 2024-12-01T23:43:13,627 INFO [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4c1dae718a04b6be4da87fc0e0181d8c in 45ms, sequenceid=5, compaction requested=false 2024-12-01T23:43:13,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4c1dae718a04b6be4da87fc0e0181d8c: 2024-12-01T23:43:13,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-01T23:43:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-01T23:43:13,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T23:43:13,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-01T23:43:13,637 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 219 msec 2024-12-01T23:43:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36755 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:43:13,741 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T23:43:13,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T23:43:13,748 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T23:43:13,749 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:13,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,749 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T23:43:13,750 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T23:43:13,750 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=112104429, stopped=false 2024-12-01T23:43:13,750 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=579b0f681375,36755,1733096591385 2024-12-01T23:43:13,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:13,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:13,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:13,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:13,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:13,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:13,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:43:13,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:13,804 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T23:43:13,804 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T23:43:13,805 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:13,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:13,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:13,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:13,805 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:43:13,805 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,37655,1733096591537' ***** 2024-12-01T23:43:13,805 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:43:13,805 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,42565,1733096591579' ***** 2024-12-01T23:43:13,805 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:43:13,805 INFO [RS:0;579b0f681375:37655 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:43:13,805 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,34173,1733096591620' ***** 2024-12-01T23:43:13,806 INFO [RS:0;579b0f681375:37655 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:43:13,806 INFO [RS:0;579b0f681375:37655 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:43:13,806 INFO [RS:1;579b0f681375:42565 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:43:13,806 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,37655,1733096591537 2024-12-01T23:43:13,806 INFO [RS:0;579b0f681375:37655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:13,806 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:43:13,806 INFO [RS:0;579b0f681375:37655 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;579b0f681375:37655. 2024-12-01T23:43:13,806 INFO [RS:1;579b0f681375:42565 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:43:13,806 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:43:13,806 DEBUG [RS:0;579b0f681375:37655 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:13,806 INFO [RS:2;579b0f681375:34173 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:43:13,806 INFO [RS:1;579b0f681375:42565 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:43:13,806 DEBUG [RS:0;579b0f681375:37655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,807 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(3091): Received CLOSE for 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,807 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,37655,1733096591537; all regions closed. 2024-12-01T23:43:13,807 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:43:13,807 INFO [RS:2;579b0f681375:34173 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:43:13,807 INFO [RS:2;579b0f681375:34173 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:43:13,807 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,34173,1733096591620 2024-12-01T23:43:13,807 INFO [RS:2;579b0f681375:34173 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:13,807 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,42565,1733096591579 2024-12-01T23:43:13,807 INFO [RS:1;579b0f681375:42565 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:13,807 INFO [RS:2;579b0f681375:34173 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;579b0f681375:34173. 2024-12-01T23:43:13,807 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:43:13,807 INFO [RS:1;579b0f681375:42565 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;579b0f681375:42565. 2024-12-01T23:43:13,807 DEBUG [RS:2;579b0f681375:34173 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:13,807 DEBUG [RS:2;579b0f681375:34173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,807 DEBUG [RS:1;579b0f681375:42565 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:43:13,807 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,34173,1733096591620; all regions closed. 2024-12-01T23:43:13,808 DEBUG [RS:1;579b0f681375:42565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,808 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4c1dae718a04b6be4da87fc0e0181d8c, disabling compactions & flushes 2024-12-01T23:43:13,808 INFO [RS:1;579b0f681375:42565 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:43:13,808 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,808 INFO [RS:1;579b0f681375:42565 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:43:13,808 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,808 INFO [RS:1;579b0f681375:42565 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:43:13,808 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. after waiting 0 ms 2024-12-01T23:43:13,808 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T23:43:13,808 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,808 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,808 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,808 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,809 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,809 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,809 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,809 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,809 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-01T23:43:13,809 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1325): Online Regions={4c1dae718a04b6be4da87fc0e0181d8c=TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c., 1588230740=hbase:meta,,1.1588230740} 2024-12-01T23:43:13,809 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,809 DEBUG [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4c1dae718a04b6be4da87fc0e0181d8c 2024-12-01T23:43:13,809 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T23:43:13,809 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T23:43:13,810 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T23:43:13,810 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T23:43:13,810 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T23:43:13,810 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-01T23:43:13,810 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,811 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:13,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741835_1011 (size=93) 2024-12-01T23:43:13,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741835_1011 (size=93) 2024-12-01T23:43:13,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741835_1011 (size=93) 2024-12-01T23:43:13,816 DEBUG [RS:0;579b0f681375:37655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs 2024-12-01T23:43:13,816 INFO [RS:0;579b0f681375:37655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 579b0f681375%2C37655%2C1733096591537:(num 1733096592330) 2024-12-01T23:43:13,816 DEBUG [RS:0;579b0f681375:37655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:13,816 INFO [RS:0;579b0f681375:37655 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:13,816 INFO [RS:0;579b0f681375:37655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:13,816 INFO [RS:0;579b0f681375:37655 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:13,817 INFO [RS:0;579b0f681375:37655 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:43:13,817 INFO [RS:0;579b0f681375:37655 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:43:13,817 INFO [RS:0;579b0f681375:37655 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:43:13,817 INFO [RS:0;579b0f681375:37655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:13,817 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:13,817 INFO [RS:0;579b0f681375:37655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37655 2024-12-01T23:43:13,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741833_1009 (size=93) 2024-12-01T23:43:13,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741833_1009 (size=93) 2024-12-01T23:43:13,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741833_1009 (size=93) 2024-12-01T23:43:13,825 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/default/TestHBaseWalOnEC/4c1dae718a04b6be4da87fc0e0181d8c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T23:43:13,826 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,826 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4c1dae718a04b6be4da87fc0e0181d8c: Waiting for close lock at 1733096593807Running coprocessor pre-close hooks at 1733096593807Disabling compacts and flushes for region at 1733096593807Disabling writes for close at 1733096593808 (+1 ms)Writing region close event to WAL at 1733096593817 (+9 ms)Running coprocessor post-close hooks at 1733096593826 (+9 ms)Closed at 1733096593826 2024-12-01T23:43:13,826 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c. 2024-12-01T23:43:13,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,37655,1733096591537 2024-12-01T23:43:13,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:13,828 INFO [RS:0;579b0f681375:37655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:13,833 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/info/112489618e504f94bc80bdb61970d209 is 153, key is TestHBaseWalOnEC,,1733096592763.4c1dae718a04b6be4da87fc0e0181d8c./info:regioninfo/1733096593146/Put/seqid=0 2024-12-01T23:43:13,839 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,37655,1733096591537] 2024-12-01T23:43:13,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741840_1016 (size=6637) 2024-12-01T23:43:13,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741840_1016 (size=6637) 2024-12-01T23:43:13,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741840_1016 (size=6637) 2024-12-01T23:43:13,841 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/info/112489618e504f94bc80bdb61970d209 2024-12-01T23:43:13,847 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,37655,1733096591537 already deleted, retry=false 2024-12-01T23:43:13,847 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,37655,1733096591537 expired; onlineServers=2 2024-12-01T23:43:13,864 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/ns/a93557adaeff4a898508ce5c00902ae6 is 43, key is default/ns:d/1733096592704/Put/seqid=0 2024-12-01T23:43:13,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741841_1017 (size=5153) 2024-12-01T23:43:13,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741841_1017 (size=5153) 2024-12-01T23:43:13,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741841_1017 (size=5153) 2024-12-01T23:43:13,872 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/ns/a93557adaeff4a898508ce5c00902ae6 2024-12-01T23:43:13,884 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:13,888 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:13,888 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:13,895 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/table/c0e9751b6ac64c8b9ad2938136224ee7 is 52, key is TestHBaseWalOnEC/table:state/1733096593165/Put/seqid=0 2024-12-01T23:43:13,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741842_1018 (size=5249) 2024-12-01T23:43:13,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741842_1018 (size=5249) 2024-12-01T23:43:13,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741842_1018 (size=5249) 2024-12-01T23:43:13,903 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/table/c0e9751b6ac64c8b9ad2938136224ee7 2024-12-01T23:43:13,911 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/info/112489618e504f94bc80bdb61970d209 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/info/112489618e504f94bc80bdb61970d209 2024-12-01T23:43:13,919 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/info/112489618e504f94bc80bdb61970d209, entries=10, sequenceid=11, filesize=6.5 K 2024-12-01T23:43:13,920 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/ns/a93557adaeff4a898508ce5c00902ae6 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/ns/a93557adaeff4a898508ce5c00902ae6 2024-12-01T23:43:13,928 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/ns/a93557adaeff4a898508ce5c00902ae6, entries=2, sequenceid=11, filesize=5.0 K 2024-12-01T23:43:13,930 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/.tmp/table/c0e9751b6ac64c8b9ad2938136224ee7 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/table/c0e9751b6ac64c8b9ad2938136224ee7 2024-12-01T23:43:13,939 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/table/c0e9751b6ac64c8b9ad2938136224ee7, entries=2, sequenceid=11, filesize=5.1 K 2024-12-01T23:43:13,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:13,939 INFO [RS:0;579b0f681375:37655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:13,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37655-0x1019490e2c90001, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:13,939 INFO [RS:0;579b0f681375:37655 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,37655,1733096591537; zookeeper connection closed. 2024-12-01T23:43:13,940 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6181ec26 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6181ec26 2024-12-01T23:43:13,941 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-12-01T23:43:13,947 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-01T23:43:13,948 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T23:43:13,948 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T23:43:13,948 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733096593809Running coprocessor pre-close hooks at 1733096593809Disabling compacts and flushes for region at 1733096593809Disabling writes for close at 1733096593810 (+1 ms)Obtaining lock to block concurrent updates at 1733096593810Preparing flush snapshotting stores in 1588230740 at 1733096593810Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733096593811 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733096593812 (+1 ms)Flushing 1588230740/info: creating writer at 1733096593812Flushing 1588230740/info: appending metadata at 1733096593833 (+21 ms)Flushing 1588230740/info: closing flushed file at 1733096593833Flushing 1588230740/ns: creating writer at 1733096593849 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733096593863 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733096593863Flushing 1588230740/table: creating writer at 1733096593880 (+17 ms)Flushing 1588230740/table: appending metadata at 1733096593894 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733096593894Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@305735cd: reopening flushed file at 1733096593910 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12038dfb: reopening flushed file at 1733096593919 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21a6d872: reopening flushed file at 1733096593928 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1733096593941 (+13 ms)Writing region close event to WAL at 1733096593942 (+1 ms)Running coprocessor post-close hooks at 1733096593948 (+6 ms)Closed at 1733096593948 2024-12-01T23:43:13,949 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T23:43:14,009 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,42565,1733096591579; all regions closed. 2024-12-01T23:43:14,010 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,010 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,010 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,011 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,011 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741836_1012 (size=2751) 2024-12-01T23:43:14,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741836_1012 (size=2751) 2024-12-01T23:43:14,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741836_1012 (size=2751) 2024-12-01T23:43:14,017 DEBUG [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs 2024-12-01T23:43:14,017 INFO [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 579b0f681375%2C42565%2C1733096591579.meta:.meta(num 1733096592631) 2024-12-01T23:43:14,018 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,018 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,018 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,018 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,018 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741834_1010 (size=1298) 2024-12-01T23:43:14,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741834_1010 (size=1298) 2024-12-01T23:43:14,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741834_1010 (size=1298) 2024-12-01T23:43:14,024 DEBUG [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs 2024-12-01T23:43:14,024 INFO [RS:1;579b0f681375:42565 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 579b0f681375%2C42565%2C1733096591579:(num 1733096592326) 2024-12-01T23:43:14,024 DEBUG [RS:1;579b0f681375:42565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:14,024 INFO [RS:1;579b0f681375:42565 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:14,024 INFO [RS:1;579b0f681375:42565 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:14,024 INFO [RS:1;579b0f681375:42565 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:14,024 INFO [RS:1;579b0f681375:42565 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:14,024 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:14,025 INFO [RS:1;579b0f681375:42565 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42565 2024-12-01T23:43:14,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,42565,1733096591579 2024-12-01T23:43:14,053 INFO [RS:1;579b0f681375:42565 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:14,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:14,054 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f029c8f5088@34c57c93 rejected from java.util.concurrent.ThreadPoolExecutor@71c7f46f[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-01T23:43:14,064 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,42565,1733096591579] 2024-12-01T23:43:14,072 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,42565,1733096591579 already deleted, retry=false 2024-12-01T23:43:14,072 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,42565,1733096591579 expired; onlineServers=1 2024-12-01T23:43:14,164 INFO [RS:1;579b0f681375:42565 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:14,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:14,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1019490e2c90002, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:14,165 INFO [RS:1;579b0f681375:42565 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,42565,1733096591579; zookeeper connection closed. 2024-12-01T23:43:14,165 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@429a8837 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@429a8837 2024-12-01T23:43:14,208 INFO [regionserver/579b0f681375:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T23:43:14,208 INFO [regionserver/579b0f681375:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T23:43:14,227 DEBUG [RS:2;579b0f681375:34173 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/oldWALs 2024-12-01T23:43:14,227 INFO [RS:2;579b0f681375:34173 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 579b0f681375%2C34173%2C1733096591620:(num 1733096592320) 2024-12-01T23:43:14,227 DEBUG [RS:2;579b0f681375:34173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:43:14,227 INFO [RS:2;579b0f681375:34173 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:43:14,227 INFO [RS:2;579b0f681375:34173 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:14,228 INFO [RS:2;579b0f681375:34173 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:14,228 INFO [RS:2;579b0f681375:34173 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:43:14,228 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:14,228 INFO [RS:2;579b0f681375:34173 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:43:14,228 INFO [RS:2;579b0f681375:34173 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:43:14,228 INFO [RS:2;579b0f681375:34173 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:14,229 INFO [RS:2;579b0f681375:34173 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34173 2024-12-01T23:43:14,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,34173,1733096591620 2024-12-01T23:43:14,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:43:14,262 INFO [RS:2;579b0f681375:34173 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:14,273 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,34173,1733096591620] 2024-12-01T23:43:14,281 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,34173,1733096591620 already deleted, retry=false 2024-12-01T23:43:14,281 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,34173,1733096591620 expired; onlineServers=0 2024-12-01T23:43:14,281 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '579b0f681375,36755,1733096591385' ***** 2024-12-01T23:43:14,281 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T23:43:14,281 INFO [M:0;579b0f681375:36755 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:43:14,281 INFO [M:0;579b0f681375:36755 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:43:14,281 DEBUG [M:0;579b0f681375:36755 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T23:43:14,282 DEBUG [M:0;579b0f681375:36755 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T23:43:14,282 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T23:43:14,282 DEBUG [master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733096591965 {}] cleaner.HFileCleaner(306): Exit Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733096591965,5,FailOnTimeoutGroup] 2024-12-01T23:43:14,282 DEBUG [master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733096591964 {}] cleaner.HFileCleaner(306): Exit Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733096591964,5,FailOnTimeoutGroup] 2024-12-01T23:43:14,282 INFO [M:0;579b0f681375:36755 {}] hbase.ChoreService(370): Chore service for: master/579b0f681375:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T23:43:14,282 INFO [M:0;579b0f681375:36755 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:43:14,282 DEBUG [M:0;579b0f681375:36755 {}] master.HMaster(1795): Stopping service threads 2024-12-01T23:43:14,282 INFO [M:0;579b0f681375:36755 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T23:43:14,283 INFO [M:0;579b0f681375:36755 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T23:43:14,283 INFO [M:0;579b0f681375:36755 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T23:43:14,283 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T23:43:14,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T23:43:14,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:43:14,289 DEBUG [M:0;579b0f681375:36755 {}] zookeeper.ZKUtil(347): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T23:43:14,289 WARN [M:0;579b0f681375:36755 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T23:43:14,290 INFO [M:0;579b0f681375:36755 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/.lastflushedseqids 2024-12-01T23:43:14,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741843_1019 (size=127) 2024-12-01T23:43:14,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741843_1019 (size=127) 2024-12-01T23:43:14,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741843_1019 (size=127) 2024-12-01T23:43:14,304 INFO [M:0;579b0f681375:36755 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T23:43:14,305 INFO [M:0;579b0f681375:36755 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T23:43:14,305 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T23:43:14,305 INFO [M:0;579b0f681375:36755 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:14,305 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:14,305 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T23:43:14,305 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:14,305 INFO [M:0;579b0f681375:36755 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-01T23:43:14,327 DEBUG [M:0;579b0f681375:36755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69d6a9683110492f997568ce4e37b750 is 82, key is hbase:meta,,1/info:regioninfo/1733096592669/Put/seqid=0 2024-12-01T23:43:14,328 WARN [IPC Server handler 4 on default port 39367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-01T23:43:14,328 WARN [IPC Server handler 4 on default port 39367 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-01T23:43:14,328 WARN [IPC Server handler 4 on default port 39367 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-01T23:43:14,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741844_1020 (size=5672) 2024-12-01T23:43:14,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741844_1020 (size=5672) 2024-12-01T23:43:14,334 INFO [M:0;579b0f681375:36755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69d6a9683110492f997568ce4e37b750 2024-12-01T23:43:14,355 DEBUG [M:0;579b0f681375:36755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0a13a360ca0416a9373489cd34312b7 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733096593178/Put/seqid=0 2024-12-01T23:43:14,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741845_1021 (size=6438) 2024-12-01T23:43:14,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741845_1021 (size=6438) 2024-12-01T23:43:14,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741845_1021 (size=6438) 2024-12-01T23:43:14,363 INFO [M:0;579b0f681375:36755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0a13a360ca0416a9373489cd34312b7 2024-12-01T23:43:14,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:14,373 INFO [RS:2;579b0f681375:34173 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:14,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34173-0x1019490e2c90003, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:14,373 INFO [RS:2;579b0f681375:34173 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,34173,1733096591620; zookeeper connection closed. 2024-12-01T23:43:14,373 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3235f6d7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3235f6d7 2024-12-01T23:43:14,373 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T23:43:14,385 DEBUG [M:0;579b0f681375:36755 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0a4ab5d967da479abec5f1d9649e5288 is 69, key is 579b0f681375,34173,1733096591620/rs:state/1733096592114/Put/seqid=0 2024-12-01T23:43:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741846_1022 (size=5294) 2024-12-01T23:43:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741846_1022 (size=5294) 2024-12-01T23:43:14,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741846_1022 (size=5294) 2024-12-01T23:43:14,392 INFO [M:0;579b0f681375:36755 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0a4ab5d967da479abec5f1d9649e5288 2024-12-01T23:43:14,399 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69d6a9683110492f997568ce4e37b750 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/69d6a9683110492f997568ce4e37b750 2024-12-01T23:43:14,406 INFO [M:0;579b0f681375:36755 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/69d6a9683110492f997568ce4e37b750, entries=8, sequenceid=72, filesize=5.5 K 2024-12-01T23:43:14,407 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0a13a360ca0416a9373489cd34312b7 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0a13a360ca0416a9373489cd34312b7 2024-12-01T23:43:14,414 INFO [M:0;579b0f681375:36755 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0a13a360ca0416a9373489cd34312b7, entries=8, sequenceid=72, filesize=6.3 K 2024-12-01T23:43:14,415 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0a4ab5d967da479abec5f1d9649e5288 as hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0a4ab5d967da479abec5f1d9649e5288 2024-12-01T23:43:14,422 INFO [M:0;579b0f681375:36755 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39367/user/jenkins/test-data/41e219f0-2ba3-a617-aa94-6ac7f3e8f66a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0a4ab5d967da479abec5f1d9649e5288, entries=3, sequenceid=72, filesize=5.2 K 2024-12-01T23:43:14,423 INFO [M:0;579b0f681375:36755 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=72, compaction requested=false 2024-12-01T23:43:14,425 INFO [M:0;579b0f681375:36755 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:43:14,425 DEBUG [M:0;579b0f681375:36755 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733096594305Disabling compacts and flushes for region at 1733096594305Disabling writes for close at 1733096594305Obtaining lock to block concurrent updates at 1733096594305Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733096594305Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733096594306 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733096594307 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733096594308 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733096594327 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733096594327Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733096594340 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733096594355 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733096594355Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733096594369 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733096594384 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733096594384Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4405988c: reopening flushed file at 1733096594397 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@266e20: reopening flushed file at 1733096594406 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28d6328: reopening flushed file at 1733096594414 (+8 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 118ms, sequenceid=72, compaction requested=false at 1733096594423 (+9 ms)Writing region close event to WAL at 1733096594425 (+2 ms)Closed at 1733096594425 2024-12-01T23:43:14,425 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,425 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,425 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T23:43:14,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33147 is added to blk_1073741830_1006 (size=32665) 2024-12-01T23:43:14,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44777 is added to blk_1073741830_1006 (size=32665) 2024-12-01T23:43:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42661 is added to blk_1073741830_1006 (size=32665) 2024-12-01T23:43:14,430 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:43:14,430 INFO [M:0;579b0f681375:36755 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T23:43:14,430 INFO [M:0;579b0f681375:36755 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36755 2024-12-01T23:43:14,430 INFO [M:0;579b0f681375:36755 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:43:14,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:14,562 INFO [M:0;579b0f681375:36755 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:43:14,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36755-0x1019490e2c90000, quorum=127.0.0.1:55440, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:43:14,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@458a3cce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:14,570 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22d1a9e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:14,570 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:14,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70ba2e51{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:14,570 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19e751cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:14,572 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:43:14,572 WARN [BP-556674411-172.17.0.2-1733096589656 heartbeating to localhost/127.0.0.1:39367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:43:14,572 WARN [BP-556674411-172.17.0.2-1733096589656 heartbeating to localhost/127.0.0.1:39367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-556674411-172.17.0.2-1733096589656 (Datanode Uuid c7621785-d443-4001-bee3-5fdcc65005c7) service to localhost/127.0.0.1:39367 2024-12-01T23:43:14,572 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:43:14,573 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data5/current/BP-556674411-172.17.0.2-1733096589656 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:14,573 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data6/current/BP-556674411-172.17.0.2-1733096589656 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:14,574 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:43:14,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73a6352c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:14,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c1940b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:14,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:14,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60e07a4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:14,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@538424cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:14,579 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:43:14,579 WARN [BP-556674411-172.17.0.2-1733096589656 heartbeating to localhost/127.0.0.1:39367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:43:14,579 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:43:14,579 WARN [BP-556674411-172.17.0.2-1733096589656 heartbeating to localhost/127.0.0.1:39367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-556674411-172.17.0.2-1733096589656 (Datanode Uuid f1545059-cd32-427a-870d-49229ae4b309) service to localhost/127.0.0.1:39367 2024-12-01T23:43:14,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data3/current/BP-556674411-172.17.0.2-1733096589656 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:14,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data4/current/BP-556674411-172.17.0.2-1733096589656 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:14,580 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:43:14,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7adc0795{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:43:14,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3887b7b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:14,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:14,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@376a5039{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:14,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b7605af{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:14,583 WARN [BP-556674411-172.17.0.2-1733096589656 heartbeating to localhost/127.0.0.1:39367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:43:14,583 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:43:14,583 WARN [BP-556674411-172.17.0.2-1733096589656 heartbeating to localhost/127.0.0.1:39367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-556674411-172.17.0.2-1733096589656 (Datanode Uuid 7d4a2d10-d5a1-4bd8-b709-5deaf52cdecf) service to localhost/127.0.0.1:39367 2024-12-01T23:43:14,583 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:43:14,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data1/current/BP-556674411-172.17.0.2-1733096589656 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:14,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/cluster_3e8674b4-25aa-2871-35fc-617b8f3102a1/data/data2/current/BP-556674411-172.17.0.2-1733096589656 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:43:14,584 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:43:14,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@200f3fb2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T23:43:14,589 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38845bbf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:43:14,589 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:43:14,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f1fd8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:43:14,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f7a7804{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/eeb01964-1699-2a5c-3e24-9b3d132376e8/hadoop.log.dir/,STOPPED} 2024-12-01T23:43:14,595 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T23:43:14,618 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-01T23:43:14,624 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 88) - Thread LEAK? -, OpenFileDescriptor=520 (was 437) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=234 (was 237), ProcessCount=11 (was 11), AvailableMemoryMB=7084 (was 7263)