2024-11-25 05:43:39,860 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-25 05:43:39,878 main DEBUG Took 0.015672 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-25 05:43:39,879 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-25 05:43:39,880 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-25 05:43:39,881 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-25 05:43:39,883 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,898 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-25 05:43:39,920 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,921 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,922 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,923 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,923 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,924 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,925 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,926 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,927 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,928 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,928 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,929 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,930 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,931 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,931 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,932 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,932 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,933 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,933 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,934 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,934 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,935 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,935 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 05:43:39,936 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,936 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-25 05:43:39,938 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 05:43:39,940 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-25 05:43:39,942 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-25 05:43:39,943 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-25 05:43:39,944 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-25 05:43:39,945 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-25 05:43:39,956 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-25 05:43:39,960 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-25 05:43:39,963 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-25 05:43:39,963 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-25 05:43:39,964 main DEBUG createAppenders(={Console}) 2024-11-25 05:43:39,965 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-25 05:43:39,965 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-25 05:43:39,966 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-25 05:43:39,967 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-25 05:43:39,967 main DEBUG OutputStream closed 2024-11-25 05:43:39,968 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-25 05:43:39,968 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-25 05:43:39,968 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-25 05:43:40,062 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-25 05:43:40,065 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-25 05:43:40,066 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-25 05:43:40,067 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-25 05:43:40,068 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-25 05:43:40,069 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-25 05:43:40,069 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-25 05:43:40,070 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-25 05:43:40,070 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-25 05:43:40,071 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-25 05:43:40,071 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-25 05:43:40,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-25 05:43:40,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-25 05:43:40,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-25 05:43:40,072 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-25 05:43:40,073 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-25 05:43:40,073 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-25 05:43:40,074 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-25 05:43:40,077 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25 05:43:40,077 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-25 05:43:40,078 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-25 05:43:40,079 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-25T05:43:40,100 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-25 05:43:40,103 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-25 05:43:40,104 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25T05:43:40,384 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46 2024-11-25T05:43:40,414 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5, deleteOnExit=true 2024-11-25T05:43:40,415 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/test.cache.data in system properties and HBase conf 2024-11-25T05:43:40,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T05:43:40,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir in system properties and HBase conf 2024-11-25T05:43:40,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T05:43:40,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T05:43:40,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T05:43:40,526 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-25T05:43:40,658 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T05:43:40,663 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T05:43:40,664 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T05:43:40,664 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T05:43:40,665 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T05:43:40,666 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T05:43:40,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T05:43:40,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T05:43:40,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T05:43:40,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T05:43:40,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/nfs.dump.dir in system properties and HBase conf 2024-11-25T05:43:40,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/java.io.tmpdir in system properties and HBase conf 2024-11-25T05:43:40,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T05:43:40,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T05:43:40,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T05:43:41,563 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-25T05:43:41,664 INFO [Time-limited test {}] log.Log(170): Logging initialized @2604ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-25T05:43:41,744 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:41,813 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:41,837 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:41,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:41,839 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T05:43:41,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:41,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e18bd18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:41,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2faf2775{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:42,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e22261{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/java.io.tmpdir/jetty-localhost-44455-hadoop-hdfs-3_4_1-tests_jar-_-any-12035480909363190670/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T05:43:42,101 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3599471c{HTTP/1.1, (http/1.1)}{localhost:44455} 2024-11-25T05:43:42,101 INFO [Time-limited test {}] server.Server(415): Started @3042ms 2024-11-25T05:43:42,543 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:42,559 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:42,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:42,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:42,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T05:43:42,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7728820b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:42,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a906869{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:42,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6bf2c732{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/java.io.tmpdir/jetty-localhost-42813-hadoop-hdfs-3_4_1-tests_jar-_-any-10526739234476627962/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:42,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1182e874{HTTP/1.1, (http/1.1)}{localhost:42813} 2024-11-25T05:43:42,727 INFO [Time-limited test {}] server.Server(415): Started @3668ms 2024-11-25T05:43:42,814 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T05:43:43,038 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:43,048 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:43,058 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:43,058 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:43,059 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T05:43:43,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23e84c60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:43,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28ffdd72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:43,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41033a80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/java.io.tmpdir/jetty-localhost-39695-hadoop-hdfs-3_4_1-tests_jar-_-any-1435132364125881346/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:43,214 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14721f03{HTTP/1.1, (http/1.1)}{localhost:39695} 2024-11-25T05:43:43,214 INFO [Time-limited test {}] server.Server(415): Started @4155ms 2024-11-25T05:43:43,217 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T05:43:43,304 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:43,313 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:43,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:43,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:43,331 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T05:43:43,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@435daa1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:43,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41dce2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:43,369 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data3/current/BP-552640952-172.17.0.2-1732513421321/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:43,369 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data2/current/BP-552640952-172.17.0.2-1732513421321/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:43,369 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data1/current/BP-552640952-172.17.0.2-1732513421321/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:43,369 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data4/current/BP-552640952-172.17.0.2-1732513421321/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:43,434 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T05:43:43,445 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T05:43:43,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67fa62aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/java.io.tmpdir/jetty-localhost-37103-hadoop-hdfs-3_4_1-tests_jar-_-any-3976983967542641972/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:43,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3e9f3a79{HTTP/1.1, (http/1.1)}{localhost:37103} 2024-11-25T05:43:43,460 INFO [Time-limited test {}] server.Server(415): Started @4400ms 2024-11-25T05:43:43,463 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T05:43:43,524 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdabf5c52b9317139 with lease ID 0x338e68fad4c0fce4: Processing first storage report for DS-d2421e3a-d939-49c2-b743-b798a69d4824 from datanode DatanodeRegistration(127.0.0.1:45471, datanodeUuid=6d73bc6f-b45e-4f9c-b1a3-bff50e110552, infoPort=35163, infoSecurePort=0, ipcPort=39181, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321) 2024-11-25T05:43:43,526 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdabf5c52b9317139 with lease ID 0x338e68fad4c0fce4: from storage DS-d2421e3a-d939-49c2-b743-b798a69d4824 node DatanodeRegistration(127.0.0.1:45471, datanodeUuid=6d73bc6f-b45e-4f9c-b1a3-bff50e110552, infoPort=35163, infoSecurePort=0, ipcPort=39181, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-25T05:43:43,527 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x496f177bc94e2722 with lease ID 0x338e68fad4c0fce5: Processing first storage report for DS-17deab0b-8df1-443f-8ae2-936bbecd6e47 from datanode DatanodeRegistration(127.0.0.1:45395, datanodeUuid=77c34a3a-8275-40ed-9179-217c23bddd81, infoPort=37635, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321) 2024-11-25T05:43:43,527 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x496f177bc94e2722 with lease ID 0x338e68fad4c0fce5: from storage DS-17deab0b-8df1-443f-8ae2-936bbecd6e47 node DatanodeRegistration(127.0.0.1:45395, datanodeUuid=77c34a3a-8275-40ed-9179-217c23bddd81, infoPort=37635, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdabf5c52b9317139 with lease ID 0x338e68fad4c0fce4: Processing first storage report for DS-e68e8e14-1320-4295-90bd-ae84ad169023 from datanode DatanodeRegistration(127.0.0.1:45471, datanodeUuid=6d73bc6f-b45e-4f9c-b1a3-bff50e110552, infoPort=35163, infoSecurePort=0, ipcPort=39181, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321) 2024-11-25T05:43:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdabf5c52b9317139 with lease ID 0x338e68fad4c0fce4: from storage DS-e68e8e14-1320-4295-90bd-ae84ad169023 node DatanodeRegistration(127.0.0.1:45471, datanodeUuid=6d73bc6f-b45e-4f9c-b1a3-bff50e110552, infoPort=35163, infoSecurePort=0, ipcPort=39181, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x496f177bc94e2722 with lease ID 0x338e68fad4c0fce5: Processing first storage report for DS-335ce62c-352b-4f32-91e9-318caa7a9c00 from datanode DatanodeRegistration(127.0.0.1:45395, datanodeUuid=77c34a3a-8275-40ed-9179-217c23bddd81, infoPort=37635, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321) 2024-11-25T05:43:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x496f177bc94e2722 with lease ID 0x338e68fad4c0fce5: from storage DS-335ce62c-352b-4f32-91e9-318caa7a9c00 node DatanodeRegistration(127.0.0.1:45395, datanodeUuid=77c34a3a-8275-40ed-9179-217c23bddd81, infoPort=37635, infoSecurePort=0, ipcPort=37857, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:43,579 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data5/current/BP-552640952-172.17.0.2-1732513421321/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:43,580 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data6/current/BP-552640952-172.17.0.2-1732513421321/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:43,630 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T05:43:43,636 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78e0fd97b213d112 with lease ID 0x338e68fad4c0fce6: Processing first storage report for DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9 from datanode DatanodeRegistration(127.0.0.1:46773, datanodeUuid=42412a7d-66f5-4914-bfcf-062eb08ff6f4, infoPort=36019, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321) 2024-11-25T05:43:43,637 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78e0fd97b213d112 with lease ID 0x338e68fad4c0fce6: from storage DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9 node DatanodeRegistration(127.0.0.1:46773, datanodeUuid=42412a7d-66f5-4914-bfcf-062eb08ff6f4, infoPort=36019, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:43,637 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x78e0fd97b213d112 with lease ID 0x338e68fad4c0fce6: Processing first storage report for DS-da1da2bf-7b6b-42b0-aa6c-70f1eccafa8e from datanode DatanodeRegistration(127.0.0.1:46773, datanodeUuid=42412a7d-66f5-4914-bfcf-062eb08ff6f4, infoPort=36019, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321) 2024-11-25T05:43:43,638 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x78e0fd97b213d112 with lease ID 0x338e68fad4c0fce6: from storage DS-da1da2bf-7b6b-42b0-aa6c-70f1eccafa8e node DatanodeRegistration(127.0.0.1:46773, datanodeUuid=42412a7d-66f5-4914-bfcf-062eb08ff6f4, infoPort=36019, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1804758513;c=1732513421321), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:43,917 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46 2024-11-25T05:43:44,006 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-25T05:43:44,092 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=407, ProcessCount=11, AvailableMemoryMB=8070 2024-11-25T05:43:44,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T05:43:44,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-25T05:43:44,232 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/zookeeper_0, clientPort=58462, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T05:43:44,245 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58462 2024-11-25T05:43:44,261 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:44,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:44,357 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:44,357 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:44,411 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:53414 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:46773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53414 dst: /127.0.0.1:46773 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:44,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-25T05:43:44,851 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:44,868 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10 with version=8 2024-11-25T05:43:44,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/hbase-staging 2024-11-25T05:43:44,978 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-25T05:43:45,269 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:45,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:45,285 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:45,293 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:45,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:45,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:45,493 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T05:43:45,564 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-25T05:43:45,576 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-25T05:43:45,581 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:45,616 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 40753 (auto-detected) 2024-11-25T05:43:45,618 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-25T05:43:45,645 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46037 2024-11-25T05:43:45,678 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46037 connecting to ZooKeeper ensemble=127.0.0.1:58462 2024-11-25T05:43:45,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:460370x0, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:45,732 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46037-0x10075683fbb0000 connected 2024-11-25T05:43:45,789 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:45,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:45,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:45,818 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10, hbase.cluster.distributed=false 2024-11-25T05:43:45,851 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:45,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46037 2024-11-25T05:43:45,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46037 2024-11-25T05:43:45,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46037 2024-11-25T05:43:45,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46037 2024-11-25T05:43:45,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46037 2024-11-25T05:43:45,995 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:45,997 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:45,998 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:45,998 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:45,998 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:45,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:46,002 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T05:43:46,006 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:46,007 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37013 2024-11-25T05:43:46,010 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37013 connecting to ZooKeeper ensemble=127.0.0.1:58462 2024-11-25T05:43:46,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,027 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370130x0, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:46,028 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37013-0x10075683fbb0001 connected 2024-11-25T05:43:46,029 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:46,034 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T05:43:46,043 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T05:43:46,046 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T05:43:46,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:46,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37013 2024-11-25T05:43:46,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37013 2024-11-25T05:43:46,061 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37013 2024-11-25T05:43:46,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37013 2024-11-25T05:43:46,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37013 2024-11-25T05:43:46,089 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:46,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:46,090 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:46,091 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:46,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:46,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:46,091 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T05:43:46,091 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:46,092 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40635 2024-11-25T05:43:46,095 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40635 connecting to ZooKeeper ensemble=127.0.0.1:58462 2024-11-25T05:43:46,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,099 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406350x0, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:46,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406350x0, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:46,107 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T05:43:46,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40635-0x10075683fbb0002 connected 2024-11-25T05:43:46,110 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T05:43:46,112 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T05:43:46,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:46,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40635 2024-11-25T05:43:46,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40635 2024-11-25T05:43:46,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40635 2024-11-25T05:43:46,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40635 2024-11-25T05:43:46,117 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40635 2024-11-25T05:43:46,137 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:46,137 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:46,137 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:46,138 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:46,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:46,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:46,138 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T05:43:46,139 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:46,140 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36001 2024-11-25T05:43:46,142 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36001 connecting to ZooKeeper ensemble=127.0.0.1:58462 2024-11-25T05:43:46,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,148 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360010x0, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:46,156 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360010x0, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:46,156 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T05:43:46,157 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36001-0x10075683fbb0003 connected 2024-11-25T05:43:46,158 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T05:43:46,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T05:43:46,162 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:46,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36001 2024-11-25T05:43:46,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36001 2024-11-25T05:43:46,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36001 2024-11-25T05:43:46,179 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36001 2024-11-25T05:43:46,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36001 2024-11-25T05:43:46,200 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8ef925b832e3:46037 2024-11-25T05:43:46,201 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8ef925b832e3,46037,1732513425040 2024-11-25T05:43:46,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,207 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,210 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8ef925b832e3,46037,1732513425040 2024-11-25T05:43:46,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:46,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:46,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,236 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:46,236 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,237 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T05:43:46,238 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8ef925b832e3,46037,1732513425040 from backup master directory 2024-11-25T05:43:46,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8ef925b832e3,46037,1732513425040 2024-11-25T05:43:46,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,241 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:46,242 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:46,243 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8ef925b832e3,46037,1732513425040 2024-11-25T05:43:46,245 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-25T05:43:46,247 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-25T05:43:46,321 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/hbase.id] with ID: 6dabbbad-1243-4207-95ed-74e7a3b6de28 2024-11-25T05:43:46,321 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/.tmp/hbase.id 2024-11-25T05:43:46,329 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,330 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:44870 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44870 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-25T05:43:46,342 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:46,342 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/.tmp/hbase.id]:[hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/hbase.id] 2024-11-25T05:43:46,401 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:46,407 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T05:43:46,432 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 23ms. 2024-11-25T05:43:46,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,445 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:46,459 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,459 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:53438 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:46773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53438 dst: /127.0.0.1:46773 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-25T05:43:46,472 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:46,489 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T05:43:46,495 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T05:43:46,502 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T05:43:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-25T05:43:46,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-25T05:43:46,534 WARN [IPC Server handler 3 on default port 44447 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-25T05:43:46,534 WARN [IPC Server handler 3 on default port 44447 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T05:43:46,534 WARN [IPC Server handler 3 on default port 44447 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T05:43:46,534 WARN [IPC Server handler 3 on default port 44447 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T05:43:46,539 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy47.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,550 WARN [IPC Server handler 4 on default port 44447 {}] blockmanagement.BlockPlacementPolicyRackFaultTolerant(145): Only able to place 2 of total expected 3 (maxNodesPerRack=3, numOfReplicas=3) nodes evenly across racks, falling back to evenly place on the remaining racks. This may not guarantee rack-level fault tolerance. Please check if the racks are configured properly. 2024-11-25T05:43:46,551 WARN [IPC Server handler 4 on default port 44447 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T05:43:46,551 WARN [IPC Server handler 4 on default port 44447 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T05:43:46,551 WARN [IPC Server handler 4 on default port 44447 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T05:43:46,552 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(635): Failed write hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189; retrying up to 10 times org.apache.hadoop.ipc.RemoteException: File /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 could only be written to 2 of the 3 required nodes for RS-3-2-1024k. There are 3 datanode(s) running and 3 node(s) are excluded in this operation. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:2480) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.chooseTargetForNewBlock(FSDirWriteFileOp.java:293) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3075) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:932) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:603) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$addBlock$11(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:500) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy47.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy48.addBlock(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSOutputStream.addBlock(DFSOutputStream.java:1143) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.allocateNewBlock(DFSStripedOutputStream.java:508) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.writeChunk(DFSStripedOutputStream.java:561) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:220) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:165) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:146) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSStripedOutputStream.closeImpl(DFSStripedOutputStream.java:1234) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:861) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:77) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.util.FSTableDescriptors.writeTableDescriptor(FSTableDescriptors.java:631) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptorForTableDirectory(FSTableDescriptors.java:707) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.bootstrap(MasterRegion.java:241) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:410) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,563 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,563 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,576 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:44920 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44920 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-25T05:43:46,582 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:46,585 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.tabledesc/.tableinfo.0000000001.1189 2024-11-25T05:43:46,588 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(591): Deleted hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.tabledesc/.tableinfo.0000000002.1189 2024-11-25T05:43:46,607 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store 2024-11-25T05:43:46,633 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,633 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:46,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:44936 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44936 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-25T05:43:46,642 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:46,647 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-25T05:43:46,650 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:46,651 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T05:43:46,652 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:46,652 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:46,653 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T05:43:46,653 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:46,654 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:46,655 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732513426651Disabling compacts and flushes for region at 1732513426651Disabling writes for close at 1732513426653 (+2 ms)Writing region close event to WAL at 1732513426654 (+1 ms)Closed at 1732513426654 2024-11-25T05:43:46,657 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/.initializing 2024-11-25T05:43:46,658 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/WALs/8ef925b832e3,46037,1732513425040 2024-11-25T05:43:46,679 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-25T05:43:46,699 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C46037%2C1732513425040, suffix=, logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/WALs/8ef925b832e3,46037,1732513425040, archiveDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/oldWALs, maxLogs=10 2024-11-25T05:43:46,736 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/WALs/8ef925b832e3,46037,1732513425040/8ef925b832e3%2C46037%2C1732513425040.1732513426705, exclude list is [], retry=0 2024-11-25T05:43:46,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:46,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45395,DS-17deab0b-8df1-443f-8ae2-936bbecd6e47,DISK] 2024-11-25T05:43:46,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46773,DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9,DISK] 2024-11-25T05:43:46,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45471,DS-d2421e3a-d939-49c2-b743-b798a69d4824,DISK] 2024-11-25T05:43:46,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-25T05:43:46,807 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/WALs/8ef925b832e3,46037,1732513425040/8ef925b832e3%2C46037%2C1732513425040.1732513426705 2024-11-25T05:43:46,808 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35163:35163),(127.0.0.1/127.0.0.1:37635:37635),(127.0.0.1/127.0.0.1:36019:36019)] 2024-11-25T05:43:46,809 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T05:43:46,809 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:46,814 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,815 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T05:43:46,901 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:46,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:46,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T05:43:46,908 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:46,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:46,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T05:43:46,914 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:46,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:46,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T05:43:46,918 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:46,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:46,920 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,923 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,925 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,932 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,932 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,936 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T05:43:46,940 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:46,947 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T05:43:46,949 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68930599, jitterRate=0.02714596688747406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T05:43:46,957 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732513426829Initializing all the Stores at 1732513426832 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513426833 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513426833Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513426834 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513426834Cleaning up temporary data from old regions at 1732513426932 (+98 ms)Region opened successfully at 1732513426957 (+25 ms) 2024-11-25T05:43:46,958 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T05:43:46,999 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@651ce9f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:47,039 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T05:43:47,057 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T05:43:47,057 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T05:43:47,061 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T05:43:47,063 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-25T05:43:47,069 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-25T05:43:47,069 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T05:43:47,099 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T05:43:47,108 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T05:43:47,111 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T05:43:47,114 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T05:43:47,116 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T05:43:47,117 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T05:43:47,120 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T05:43:47,124 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T05:43:47,125 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T05:43:47,127 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T05:43:47,128 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T05:43:47,148 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T05:43:47,149 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T05:43:47,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:47,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:47,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,155 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:47,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,155 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:47,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,160 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8ef925b832e3,46037,1732513425040, sessionid=0x10075683fbb0000, setting cluster-up flag (Was=false) 2024-11-25T05:43:47,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,176 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,183 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T05:43:47,186 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8ef925b832e3,46037,1732513425040 2024-11-25T05:43:47,191 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:47,197 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T05:43:47,199 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8ef925b832e3,46037,1732513425040 2024-11-25T05:43:47,205 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T05:43:47,285 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(746): ClusterId : 6dabbbad-1243-4207-95ed-74e7a3b6de28 2024-11-25T05:43:47,287 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(746): ClusterId : 6dabbbad-1243-4207-95ed-74e7a3b6de28 2024-11-25T05:43:47,287 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(746): ClusterId : 6dabbbad-1243-4207-95ed-74e7a3b6de28 2024-11-25T05:43:47,288 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T05:43:47,288 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T05:43:47,288 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T05:43:47,291 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:47,304 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T05:43:47,305 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T05:43:47,305 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T05:43:47,307 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T05:43:47,307 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T05:43:47,307 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T05:43:47,307 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T05:43:47,309 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T05:43:47,310 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T05:43:47,311 DEBUG [RS:0;8ef925b832e3:37013 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c9e7a64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:47,313 DEBUG [RS:1;8ef925b832e3:40635 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2827f3f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:47,313 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T05:43:47,314 DEBUG [RS:2;8ef925b832e3:36001 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7235c2c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:47,314 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T05:43:47,322 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8ef925b832e3,46037,1732513425040 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T05:43:47,332 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:47,332 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:47,333 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:47,333 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:47,334 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8ef925b832e3:0, corePoolSize=10, maxPoolSize=10 2024-11-25T05:43:47,334 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,334 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:47,334 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,335 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8ef925b832e3:37013 2024-11-25T05:43:47,339 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T05:43:47,339 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T05:43:47,340 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T05:43:47,341 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;8ef925b832e3:36001 2024-11-25T05:43:47,341 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;8ef925b832e3:40635 2024-11-25T05:43:47,342 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T05:43:47,342 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T05:43:47,342 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T05:43:47,343 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T05:43:47,343 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,46037,1732513425040 with port=37013, startcode=1732513425956 2024-11-25T05:43:47,343 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T05:43:47,343 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T05:43:47,343 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,46037,1732513425040 with port=40635, startcode=1732513426089 2024-11-25T05:43:47,344 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,46037,1732513425040 with port=36001, startcode=1732513426136 2024-11-25T05:43:47,358 DEBUG [RS:1;8ef925b832e3:40635 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T05:43:47,358 DEBUG [RS:0;8ef925b832e3:37013 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T05:43:47,358 DEBUG [RS:2;8ef925b832e3:36001 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T05:43:47,358 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:47,359 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T05:43:47,367 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:47,368 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T05:43:47,378 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732513457378 2024-11-25T05:43:47,380 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T05:43:47,381 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T05:43:47,387 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T05:43:47,388 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T05:43:47,388 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T05:43:47,388 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T05:43:47,396 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:47,397 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:47,397 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,417 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T05:43:47,423 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43641, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T05:43:47,424 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46371, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T05:43:47,424 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T05:43:47,425 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42399, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T05:43:47,426 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T05:43:47,428 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:44954 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44954 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:47,431 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T05:43:47,432 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T05:43:47,433 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-25T05:43:47,437 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.large.0-1732513427433,5,FailOnTimeoutGroup] 2024-11-25T05:43:47,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-25T05:43:47,440 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-25T05:43:47,441 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-25T05:43:47,445 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.small.0-1732513427438,5,FailOnTimeoutGroup] 2024-11-25T05:43:47,445 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,446 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T05:43:47,447 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,447 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,469 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-25T05:43:47,469 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-25T05:43:47,469 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-25T05:43:47,469 WARN [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-25T05:43:47,469 WARN [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-25T05:43:47,469 WARN [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-25T05:43:47,571 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,46037,1732513425040 with port=37013, startcode=1732513425956 2024-11-25T05:43:47,571 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,46037,1732513425040 with port=36001, startcode=1732513426136 2024-11-25T05:43:47,571 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,46037,1732513425040 with port=40635, startcode=1732513426089 2024-11-25T05:43:47,573 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8ef925b832e3,40635,1732513426089 2024-11-25T05:43:47,575 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] master.ServerManager(517): Registering regionserver=8ef925b832e3,40635,1732513426089 2024-11-25T05:43:47,582 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8ef925b832e3,37013,1732513425956 2024-11-25T05:43:47,582 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] master.ServerManager(517): Registering regionserver=8ef925b832e3,37013,1732513425956 2024-11-25T05:43:47,582 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10 2024-11-25T05:43:47,582 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44447 2024-11-25T05:43:47,582 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T05:43:47,586 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8ef925b832e3,36001,1732513426136 2024-11-25T05:43:47,586 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10 2024-11-25T05:43:47,586 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46037 {}] master.ServerManager(517): Registering regionserver=8ef925b832e3,36001,1732513426136 2024-11-25T05:43:47,586 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44447 2024-11-25T05:43:47,586 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T05:43:47,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:47,587 DEBUG [RS:1;8ef925b832e3:40635 {}] zookeeper.ZKUtil(111): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8ef925b832e3,40635,1732513426089 2024-11-25T05:43:47,587 WARN [RS:1;8ef925b832e3:40635 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:47,588 INFO [RS:1;8ef925b832e3:40635 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T05:43:47,588 DEBUG [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,40635,1732513426089 2024-11-25T05:43:47,590 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10 2024-11-25T05:43:47,590 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44447 2024-11-25T05:43:47,591 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T05:43:47,591 DEBUG [RS:0;8ef925b832e3:37013 {}] zookeeper.ZKUtil(111): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8ef925b832e3,37013,1732513425956 2024-11-25T05:43:47,591 WARN [RS:0;8ef925b832e3:37013 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:47,591 INFO [RS:0;8ef925b832e3:37013 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T05:43:47,591 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956 2024-11-25T05:43:47,592 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8ef925b832e3,37013,1732513425956] 2024-11-25T05:43:47,592 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8ef925b832e3,40635,1732513426089] 2024-11-25T05:43:47,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:47,593 DEBUG [RS:2;8ef925b832e3:36001 {}] zookeeper.ZKUtil(111): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8ef925b832e3,36001,1732513426136 2024-11-25T05:43:47,593 WARN [RS:2;8ef925b832e3:36001 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:47,593 INFO [RS:2;8ef925b832e3:36001 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T05:43:47,593 DEBUG [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,36001,1732513426136 2024-11-25T05:43:47,593 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8ef925b832e3,36001,1732513426136] 2024-11-25T05:43:47,616 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T05:43:47,616 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T05:43:47,616 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T05:43:47,631 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T05:43:47,631 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T05:43:47,632 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T05:43:47,636 INFO [RS:1;8ef925b832e3:40635 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T05:43:47,636 INFO [RS:2;8ef925b832e3:36001 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T05:43:47,636 INFO [RS:0;8ef925b832e3:37013 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T05:43:47,636 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,636 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,636 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,637 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T05:43:47,637 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T05:43:47,637 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T05:43:47,643 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T05:43:47,643 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T05:43:47,643 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T05:43:47,644 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,644 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,644 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,644 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,644 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:47,645 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:47,645 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,645 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:47,646 DEBUG [RS:1;8ef925b832e3:40635 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:47,646 DEBUG [RS:0;8ef925b832e3:37013 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:47,646 DEBUG [RS:2;8ef925b832e3:36001 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:47,647 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,647 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,647 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,647 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,647 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,40635,1732513426089-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,648 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,36001,1732513426136-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:47,649 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,649 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,649 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,649 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,649 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,649 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37013,1732513425956-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:47,665 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T05:43:47,667 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,36001,1732513426136-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,667 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,668 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.Replication(171): 8ef925b832e3,36001,1732513426136 started 2024-11-25T05:43:47,670 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T05:43:47,670 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T05:43:47,670 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37013,1732513425956-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,670 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,40635,1732513426089-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,670 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,670 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,670 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.Replication(171): 8ef925b832e3,37013,1732513425956 started 2024-11-25T05:43:47,670 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.Replication(171): 8ef925b832e3,40635,1732513426089 started 2024-11-25T05:43:47,684 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,684 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(1482): Serving as 8ef925b832e3,36001,1732513426136, RpcServer on 8ef925b832e3/172.17.0.2:36001, sessionid=0x10075683fbb0003 2024-11-25T05:43:47,685 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T05:43:47,685 DEBUG [RS:2;8ef925b832e3:36001 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8ef925b832e3,36001,1732513426136 2024-11-25T05:43:47,685 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,36001,1732513426136' 2024-11-25T05:43:47,685 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T05:43:47,686 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T05:43:47,687 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T05:43:47,687 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T05:43:47,687 DEBUG [RS:2;8ef925b832e3:36001 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8ef925b832e3,36001,1732513426136 2024-11-25T05:43:47,687 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,36001,1732513426136' 2024-11-25T05:43:47,687 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T05:43:47,688 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T05:43:47,689 DEBUG [RS:2;8ef925b832e3:36001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T05:43:47,689 INFO [RS:2;8ef925b832e3:36001 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T05:43:47,689 INFO [RS:2;8ef925b832e3:36001 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T05:43:47,692 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,692 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:47,692 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(1482): Serving as 8ef925b832e3,40635,1732513426089, RpcServer on 8ef925b832e3/172.17.0.2:40635, sessionid=0x10075683fbb0002 2024-11-25T05:43:47,692 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1482): Serving as 8ef925b832e3,37013,1732513425956, RpcServer on 8ef925b832e3/172.17.0.2:37013, sessionid=0x10075683fbb0001 2024-11-25T05:43:47,692 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T05:43:47,692 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T05:43:47,692 DEBUG [RS:0;8ef925b832e3:37013 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8ef925b832e3,37013,1732513425956 2024-11-25T05:43:47,692 DEBUG [RS:1;8ef925b832e3:40635 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8ef925b832e3,40635,1732513426089 2024-11-25T05:43:47,692 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,40635,1732513426089' 2024-11-25T05:43:47,692 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,37013,1732513425956' 2024-11-25T05:43:47,692 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T05:43:47,692 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T05:43:47,693 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T05:43:47,693 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T05:43:47,694 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T05:43:47,694 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T05:43:47,694 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T05:43:47,694 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T05:43:47,694 DEBUG [RS:1;8ef925b832e3:40635 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8ef925b832e3,40635,1732513426089 2024-11-25T05:43:47,694 DEBUG [RS:0;8ef925b832e3:37013 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8ef925b832e3,37013,1732513425956 2024-11-25T05:43:47,694 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,40635,1732513426089' 2024-11-25T05:43:47,694 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,37013,1732513425956' 2024-11-25T05:43:47,694 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T05:43:47,694 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T05:43:47,695 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T05:43:47,695 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T05:43:47,695 DEBUG [RS:0;8ef925b832e3:37013 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T05:43:47,695 INFO [RS:0;8ef925b832e3:37013 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T05:43:47,695 DEBUG [RS:1;8ef925b832e3:40635 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T05:43:47,695 INFO [RS:0;8ef925b832e3:37013 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T05:43:47,696 INFO [RS:1;8ef925b832e3:40635 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T05:43:47,696 INFO [RS:1;8ef925b832e3:40635 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T05:43:47,800 INFO [RS:2;8ef925b832e3:36001 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-25T05:43:47,800 INFO [RS:1;8ef925b832e3:40635 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-25T05:43:47,800 INFO [RS:0;8ef925b832e3:37013 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-25T05:43:47,805 INFO [RS:1;8ef925b832e3:40635 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C40635%2C1732513426089, suffix=, logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,40635,1732513426089, archiveDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs, maxLogs=32 2024-11-25T05:43:47,805 INFO [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C37013%2C1732513425956, suffix=, logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956, archiveDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs, maxLogs=32 2024-11-25T05:43:47,805 INFO [RS:2;8ef925b832e3:36001 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C36001%2C1732513426136, suffix=, logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,36001,1732513426136, archiveDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs, maxLogs=32 2024-11-25T05:43:47,825 DEBUG [RS:1;8ef925b832e3:40635 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,40635,1732513426089/8ef925b832e3%2C40635%2C1732513426089.1732513427812, exclude list is [], retry=0 2024-11-25T05:43:47,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45395,DS-17deab0b-8df1-443f-8ae2-936bbecd6e47,DISK] 2024-11-25T05:43:47,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45471,DS-d2421e3a-d939-49c2-b743-b798a69d4824,DISK] 2024-11-25T05:43:47,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46773,DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9,DISK] 2024-11-25T05:43:47,832 DEBUG [RS:0;8ef925b832e3:37013 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956/8ef925b832e3%2C37013%2C1732513425956.1732513427812, exclude list is [], retry=0 2024-11-25T05:43:47,832 DEBUG [RS:2;8ef925b832e3:36001 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,36001,1732513426136/8ef925b832e3%2C36001%2C1732513426136.1732513427812, exclude list is [], retry=0 2024-11-25T05:43:47,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45471,DS-d2421e3a-d939-49c2-b743-b798a69d4824,DISK] 2024-11-25T05:43:47,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45395,DS-17deab0b-8df1-443f-8ae2-936bbecd6e47,DISK] 2024-11-25T05:43:47,840 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:47,842 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T05:43:47,842 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10 2024-11-25T05:43:47,848 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:47,848 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:47,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46773,DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9,DISK] 2024-11-25T05:43:47,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45395,DS-17deab0b-8df1-443f-8ae2-936bbecd6e47,DISK] 2024-11-25T05:43:47,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46773,DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9,DISK] 2024-11-25T05:43:47,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45471,DS-d2421e3a-d939-49c2-b743-b798a69d4824,DISK] 2024-11-25T05:43:47,860 INFO [RS:1;8ef925b832e3:40635 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,40635,1732513426089/8ef925b832e3%2C40635%2C1732513426089.1732513427812 2024-11-25T05:43:47,867 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:44978 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775696_1017] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44978 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:47,868 INFO [RS:2;8ef925b832e3:36001 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,36001,1732513426136/8ef925b832e3%2C36001%2C1732513426136.1732513427812 2024-11-25T05:43:47,871 DEBUG [RS:1;8ef925b832e3:40635 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36019:36019),(127.0.0.1/127.0.0.1:35163:35163),(127.0.0.1/127.0.0.1:37635:37635)] 2024-11-25T05:43:47,873 INFO [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956/8ef925b832e3%2C37013%2C1732513425956.1732513427812 2024-11-25T05:43:47,874 DEBUG [RS:2;8ef925b832e3:36001 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35163:35163),(127.0.0.1/127.0.0.1:37635:37635),(127.0.0.1/127.0.0.1:36019:36019)] 2024-11-25T05:43:47,881 DEBUG [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37635:37635),(127.0.0.1/127.0.0.1:35163:35163),(127.0.0.1/127.0.0.1:36019:36019)] 2024-11-25T05:43:47,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775696_1018 (size=32) 2024-11-25T05:43:48,286 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:48,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:48,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T05:43:48,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T05:43:48,294 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,296 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T05:43:48,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T05:43:48,301 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T05:43:48,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T05:43:48,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T05:43:48,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T05:43:48,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,313 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T05:43:48,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740 2024-11-25T05:43:48,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740 2024-11-25T05:43:48,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T05:43:48,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T05:43:48,321 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T05:43:48,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T05:43:48,333 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T05:43:48,334 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65105529, jitterRate=-0.029852017760276794}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T05:43:48,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732513428288Initializing all the Stores at 1732513428290 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513428290Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513428291 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513428291Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513428291Cleaning up temporary data from old regions at 1732513428320 (+29 ms)Region opened successfully at 1732513428336 (+16 ms) 2024-11-25T05:43:48,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T05:43:48,336 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T05:43:48,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T05:43:48,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T05:43:48,337 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T05:43:48,338 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T05:43:48,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732513428336Disabling compacts and flushes for region at 1732513428336Disabling writes for close at 1732513428336Writing region close event to WAL at 1732513428338 (+2 ms)Closed at 1732513428338 2024-11-25T05:43:48,341 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:48,342 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T05:43:48,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T05:43:48,360 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T05:43:48,363 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T05:43:48,517 DEBUG [8ef925b832e3:46037 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-25T05:43:48,527 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(204): Hosts are {8ef925b832e3=0} racks are {/default-rack=0} 2024-11-25T05:43:48,536 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-25T05:43:48,537 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-25T05:43:48,537 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-25T05:43:48,537 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-25T05:43:48,537 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-25T05:43:48,537 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-25T05:43:48,537 INFO [8ef925b832e3:46037 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-25T05:43:48,537 INFO [8ef925b832e3:46037 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-25T05:43:48,537 INFO [8ef925b832e3:46037 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-25T05:43:48,537 DEBUG [8ef925b832e3:46037 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-25T05:43:48,546 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8ef925b832e3,37013,1732513425956 2024-11-25T05:43:48,555 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8ef925b832e3,37013,1732513425956, state=OPENING 2024-11-25T05:43:48,561 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T05:43:48,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:48,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:48,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:48,562 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:48,563 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,563 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,564 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,564 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,566 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T05:43:48,568 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8ef925b832e3,37013,1732513425956}] 2024-11-25T05:43:48,748 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T05:43:48,752 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T05:43:48,765 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T05:43:48,766 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T05:43:48,767 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-25T05:43:48,797 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C37013%2C1732513425956.meta, suffix=.meta, logDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956, archiveDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs, maxLogs=32 2024-11-25T05:43:48,817 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956/8ef925b832e3%2C37013%2C1732513425956.meta.1732513428798.meta, exclude list is [], retry=0 2024-11-25T05:43:48,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45395,DS-17deab0b-8df1-443f-8ae2-936bbecd6e47,DISK] 2024-11-25T05:43:48,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46773,DS-96aa0a49-7eeb-46fa-ac00-5cc71cc165f9,DISK] 2024-11-25T05:43:48,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45471,DS-d2421e3a-d939-49c2-b743-b798a69d4824,DISK] 2024-11-25T05:43:48,831 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,37013,1732513425956/8ef925b832e3%2C37013%2C1732513425956.meta.1732513428798.meta 2024-11-25T05:43:48,832 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36019:36019),(127.0.0.1/127.0.0.1:37635:37635),(127.0.0.1/127.0.0.1:35163:35163)] 2024-11-25T05:43:48,832 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T05:43:48,834 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T05:43:48,837 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T05:43:48,843 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T05:43:48,847 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T05:43:48,848 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:48,848 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T05:43:48,848 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T05:43:48,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T05:43:48,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T05:43:48,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T05:43:48,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T05:43:48,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T05:43:48,861 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T05:43:48,861 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T05:43:48,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T05:43:48,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:48,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:48,865 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T05:43:48,867 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740 2024-11-25T05:43:48,870 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740 2024-11-25T05:43:48,872 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T05:43:48,872 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T05:43:48,873 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T05:43:48,876 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T05:43:48,879 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65927332, jitterRate=-0.01760619878768921}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T05:43:48,879 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T05:43:48,880 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732513428849Writing region info on filesystem at 1732513428849Initializing all the Stores at 1732513428851 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513428851Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513428852 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513428852Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513428852Cleaning up temporary data from old regions at 1732513428873 (+21 ms)Running coprocessor post-open hooks at 1732513428879 (+6 ms)Region opened successfully at 1732513428880 (+1 ms) 2024-11-25T05:43:48,888 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732513428738 2024-11-25T05:43:48,901 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T05:43:48,902 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T05:43:48,904 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8ef925b832e3,37013,1732513425956 2024-11-25T05:43:48,907 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8ef925b832e3,37013,1732513425956, state=OPEN 2024-11-25T05:43:48,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:48,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:48,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:48,909 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:48,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:48,909 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8ef925b832e3,37013,1732513425956 2024-11-25T05:43:48,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T05:43:48,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8ef925b832e3,37013,1732513425956 in 342 msec 2024-11-25T05:43:48,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T05:43:48,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 568 msec 2024-11-25T05:43:48,926 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:48,926 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T05:43:48,951 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T05:43:48,953 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8ef925b832e3,37013,1732513425956, seqNum=-1] 2024-11-25T05:43:48,974 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T05:43:48,977 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45797, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T05:43:49,000 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.7600 sec 2024-11-25T05:43:49,000 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732513429000, completionTime=-1 2024-11-25T05:43:49,003 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-25T05:43:49,003 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T05:43:49,045 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-25T05:43:49,045 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732513489045 2024-11-25T05:43:49,045 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732513549045 2024-11-25T05:43:49,045 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 42 msec 2024-11-25T05:43:49,048 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-25T05:43:49,057 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,46037,1732513425040-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,057 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,46037,1732513425040-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,057 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,46037,1732513425040-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,060 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8ef925b832e3:46037, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,065 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,066 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,068 DEBUG [master/8ef925b832e3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T05:43:49,099 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.856sec 2024-11-25T05:43:49,101 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T05:43:49,103 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T05:43:49,104 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T05:43:49,105 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T05:43:49,105 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T05:43:49,106 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,46037,1732513425040-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:49,107 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,46037,1732513425040-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T05:43:49,112 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T05:43:49,113 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T05:43:49,113 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,46037,1732513425040-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:49,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f2048d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T05:43:49,210 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-25T05:43:49,210 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-25T05:43:49,213 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8ef925b832e3,46037,-1 for getting cluster id 2024-11-25T05:43:49,216 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T05:43:49,225 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6dabbbad-1243-4207-95ed-74e7a3b6de28' 2024-11-25T05:43:49,227 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T05:43:49,227 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6dabbbad-1243-4207-95ed-74e7a3b6de28" 2024-11-25T05:43:49,230 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5856fed3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T05:43:49,230 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8ef925b832e3,46037,-1] 2024-11-25T05:43:49,232 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T05:43:49,234 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:49,235 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T05:43:49,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7feee08b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T05:43:49,239 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T05:43:49,248 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8ef925b832e3,37013,1732513425956, seqNum=-1] 2024-11-25T05:43:49,249 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T05:43:49,252 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T05:43:49,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8ef925b832e3,46037,1732513425040 2024-11-25T05:43:49,280 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T05:43:49,286 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 8ef925b832e3,46037,1732513425040 2024-11-25T05:43:49,289 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c0ad99b 2024-11-25T05:43:49,291 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T05:43:49,294 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43724, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T05:43:49,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T05:43:49,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-25T05:43:49,314 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T05:43:49,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-25T05:43:49,317 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:49,320 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T05:43:49,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:49,332 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:49,332 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:49,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:48772 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48772 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:49,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-25T05:43:49,352 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:49,355 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 007ad91201e659f6c1b4f8fa73808a46, NAME => 'TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10 2024-11-25T05:43:49,362 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:49,362 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:49,375 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:54214 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:46773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54214 dst: /127.0.0.1:46773 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:49,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-25T05:43:49,382 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:49,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:49,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 007ad91201e659f6c1b4f8fa73808a46, disabling compactions & flushes 2024-11-25T05:43:49,383 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. after waiting 0 ms 2024-11-25T05:43:49,384 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,384 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,384 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 007ad91201e659f6c1b4f8fa73808a46: Waiting for close lock at 1732513429383Disabling compacts and flushes for region at 1732513429383Disabling writes for close at 1732513429383Writing region close event to WAL at 1732513429384 (+1 ms)Closed at 1732513429384 2024-11-25T05:43:49,386 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T05:43:49,392 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732513429386"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732513429386"}]},"ts":"1732513429386"} 2024-11-25T05:43:49,398 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T05:43:49,400 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T05:43:49,403 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732513429400"}]},"ts":"1732513429400"} 2024-11-25T05:43:49,408 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-25T05:43:49,409 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {8ef925b832e3=0} racks are {/default-rack=0} 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-25T05:43:49,411 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-25T05:43:49,411 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-25T05:43:49,411 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-25T05:43:49,411 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-25T05:43:49,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=007ad91201e659f6c1b4f8fa73808a46, ASSIGN}] 2024-11-25T05:43:49,416 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=007ad91201e659f6c1b4f8fa73808a46, ASSIGN 2024-11-25T05:43:49,418 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=007ad91201e659f6c1b4f8fa73808a46, ASSIGN; state=OFFLINE, location=8ef925b832e3,37013,1732513425956; forceNewPlan=false, retain=false 2024-11-25T05:43:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:49,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-25T05:43:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-25T05:43:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-25T05:43:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-25T05:43:49,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-25T05:43:49,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-25T05:43:49,572 INFO [8ef925b832e3:46037 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-25T05:43:49,573 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=007ad91201e659f6c1b4f8fa73808a46, regionState=OPENING, regionLocation=8ef925b832e3,37013,1732513425956 2024-11-25T05:43:49,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=007ad91201e659f6c1b4f8fa73808a46, ASSIGN because future has completed 2024-11-25T05:43:49,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 007ad91201e659f6c1b4f8fa73808a46, server=8ef925b832e3,37013,1732513425956}] 2024-11-25T05:43:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-25T05:43:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-25T05:43:49,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775692_1018 (size=32) 2024-11-25T05:43:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:49,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775693_1018 (size=32) 2024-11-25T05:43:49,741 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,742 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 007ad91201e659f6c1b4f8fa73808a46, NAME => 'TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46.', STARTKEY => '', ENDKEY => ''} 2024-11-25T05:43:49,742 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,742 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:49,743 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,743 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,746 INFO [StoreOpener-007ad91201e659f6c1b4f8fa73808a46-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,749 INFO [StoreOpener-007ad91201e659f6c1b4f8fa73808a46-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 007ad91201e659f6c1b4f8fa73808a46 columnFamilyName cf 2024-11-25T05:43:49,749 DEBUG [StoreOpener-007ad91201e659f6c1b4f8fa73808a46-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:49,751 INFO [StoreOpener-007ad91201e659f6c1b4f8fa73808a46-1 {}] regionserver.HStore(327): Store=007ad91201e659f6c1b4f8fa73808a46/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:49,751 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,753 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,753 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,754 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,755 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,758 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,768 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T05:43:49,769 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 007ad91201e659f6c1b4f8fa73808a46; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64094363, jitterRate=-0.044919565320014954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T05:43:49,769 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:49,770 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 007ad91201e659f6c1b4f8fa73808a46: Running coprocessor pre-open hook at 1732513429743Writing region info on filesystem at 1732513429743Initializing all the Stores at 1732513429745 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513429746 (+1 ms)Cleaning up temporary data from old regions at 1732513429755 (+9 ms)Running coprocessor post-open hooks at 1732513429769 (+14 ms)Region opened successfully at 1732513429770 (+1 ms) 2024-11-25T05:43:49,772 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46., pid=6, masterSystemTime=1732513429734 2024-11-25T05:43:49,776 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,776 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:49,777 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=007ad91201e659f6c1b4f8fa73808a46, regionState=OPEN, openSeqNum=2, regionLocation=8ef925b832e3,37013,1732513425956 2024-11-25T05:43:49,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 007ad91201e659f6c1b4f8fa73808a46, server=8ef925b832e3,37013,1732513425956 because future has completed 2024-11-25T05:43:49,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T05:43:49,790 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 007ad91201e659f6c1b4f8fa73808a46, server=8ef925b832e3,37013,1732513425956 in 205 msec 2024-11-25T05:43:49,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T05:43:49,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=007ad91201e659f6c1b4f8fa73808a46, ASSIGN in 377 msec 2024-11-25T05:43:49,797 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T05:43:49,797 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732513429797"}]},"ts":"1732513429797"} 2024-11-25T05:43:49,801 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-25T05:43:49,804 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T05:43:49,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 500 msec 2024-11-25T05:43:49,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:49,956 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-25T05:43:49,956 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-25T05:43:49,957 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T05:43:49,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-25T05:43:49,965 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T05:43:49,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-25T05:43:49,977 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46., hostname=8ef925b832e3,37013,1732513425956, seqNum=2] 2024-11-25T05:43:49,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-25T05:43:49,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-25T05:43:49,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:49,997 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-25T05:43:49,999 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T05:43:50,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T05:43:50,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:50,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37013 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-25T05:43:50,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,172 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 007ad91201e659f6c1b4f8fa73808a46 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-25T05:43:50,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/.tmp/cf/e819eb4e4b6b40d78af61c7476ccbb0a is 36, key is row/cf:cq/1732513429980/Put/seqid=0 2024-11-25T05:43:50,239 WARN [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:50,239 WARN [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:50,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1868430370_22 at /127.0.0.1:54744 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:45395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54744 dst: /127.0.0.1:45395 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:50,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-25T05:43:50,249 WARN [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:50,249 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/.tmp/cf/e819eb4e4b6b40d78af61c7476ccbb0a 2024-11-25T05:43:50,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/.tmp/cf/e819eb4e4b6b40d78af61c7476ccbb0a as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/cf/e819eb4e4b6b40d78af61c7476ccbb0a 2024-11-25T05:43:50,307 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/cf/e819eb4e4b6b40d78af61c7476ccbb0a, entries=1, sequenceid=5, filesize=4.7 K 2024-11-25T05:43:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:50,315 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 007ad91201e659f6c1b4f8fa73808a46 in 143ms, sequenceid=5, compaction requested=false 2024-11-25T05:43:50,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-25T05:43:50,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 007ad91201e659f6c1b4f8fa73808a46: 2024-11-25T05:43:50,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-25T05:43:50,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-25T05:43:50,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T05:43:50,331 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 324 msec 2024-11-25T05:43:50,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 343 msec 2024-11-25T05:43:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46037 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:50,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-25T05:43:50,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T05:43:50,643 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T05:43:50,643 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:50,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,651 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T05:43:50,651 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T05:43:50,651 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=994033306, stopped=false 2024-11-25T05:43:50,652 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8ef925b832e3,46037,1732513425040 2024-11-25T05:43:50,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:50,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:50,653 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:50,653 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:50,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:50,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:50,654 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T05:43:50,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:50,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:50,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:50,654 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T05:43:50,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:50,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:50,655 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:50,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:50,656 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8ef925b832e3,37013,1732513425956' ***** 2024-11-25T05:43:50,656 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T05:43:50,656 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8ef925b832e3,40635,1732513426089' ***** 2024-11-25T05:43:50,656 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T05:43:50,656 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T05:43:50,656 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T05:43:50,656 INFO [RS:1;8ef925b832e3:40635 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T05:43:50,656 INFO [RS:0;8ef925b832e3:37013 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T05:43:50,656 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T05:43:50,656 INFO [RS:1;8ef925b832e3:40635 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T05:43:50,656 INFO [RS:0;8ef925b832e3:37013 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T05:43:50,657 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8ef925b832e3,36001,1732513426136' ***** 2024-11-25T05:43:50,657 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(959): stopping server 8ef925b832e3,40635,1732513426089 2024-11-25T05:43:50,657 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T05:43:50,657 INFO [RS:1;8ef925b832e3:40635 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:50,657 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(3091): Received CLOSE for 007ad91201e659f6c1b4f8fa73808a46 2024-11-25T05:43:50,657 INFO [RS:1;8ef925b832e3:40635 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;8ef925b832e3:40635. 2024-11-25T05:43:50,657 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T05:43:50,657 DEBUG [RS:1;8ef925b832e3:40635 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:50,657 DEBUG [RS:1;8ef925b832e3:40635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,657 INFO [RS:2;8ef925b832e3:36001 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T05:43:50,657 INFO [RS:2;8ef925b832e3:36001 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T05:43:50,657 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(959): stopping server 8ef925b832e3,36001,1732513426136 2024-11-25T05:43:50,657 INFO [RS:2;8ef925b832e3:36001 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:50,657 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T05:43:50,657 INFO [RS:2;8ef925b832e3:36001 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;8ef925b832e3:36001. 2024-11-25T05:43:50,657 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(976): stopping server 8ef925b832e3,40635,1732513426089; all regions closed. 2024-11-25T05:43:50,657 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T05:43:50,657 DEBUG [RS:2;8ef925b832e3:36001 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:50,657 DEBUG [RS:2;8ef925b832e3:36001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,658 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(976): stopping server 8ef925b832e3,36001,1732513426136; all regions closed. 2024-11-25T05:43:50,661 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(959): stopping server 8ef925b832e3,37013,1732513425956 2024-11-25T05:43:50,661 INFO [RS:0;8ef925b832e3:37013 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:50,661 INFO [RS:0;8ef925b832e3:37013 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8ef925b832e3:37013. 2024-11-25T05:43:50,661 DEBUG [RS:0;8ef925b832e3:37013 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:50,662 DEBUG [RS:0;8ef925b832e3:37013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,662 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 007ad91201e659f6c1b4f8fa73808a46, disabling compactions & flushes 2024-11-25T05:43:50,662 INFO [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,662 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T05:43:50,662 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,662 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T05:43:50,662 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T05:43:50,662 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. after waiting 0 ms 2024-11-25T05:43:50,662 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,662 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T05:43:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_1073741827_1015 (size=93) 2024-11-25T05:43:50,668 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/WALs/8ef925b832e3,36001,1732513426136/8ef925b832e3%2C36001%2C1732513426136.1732513427812 not finished, retry = 0 2024-11-25T05:43:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_1073741827_1015 (size=93) 2024-11-25T05:43:50,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741827_1015 (size=93) 2024-11-25T05:43:50,670 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T05:43:50,670 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T05:43:50,670 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 007ad91201e659f6c1b4f8fa73808a46=TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46.} 2024-11-25T05:43:50,671 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T05:43:50,671 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T05:43:50,671 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T05:43:50,671 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T05:43:50,671 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1351): Waiting on 007ad91201e659f6c1b4f8fa73808a46, 1588230740 2024-11-25T05:43:50,671 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-25T05:43:50,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741826_1014 (size=93) 2024-11-25T05:43:50,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_1073741826_1014 (size=93) 2024-11-25T05:43:50,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_1073741826_1014 (size=93) 2024-11-25T05:43:50,688 DEBUG [RS:1;8ef925b832e3:40635 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs 2024-11-25T05:43:50,688 INFO [RS:1;8ef925b832e3:40635 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 8ef925b832e3%2C40635%2C1732513426089:(num 1732513427812) 2024-11-25T05:43:50,688 DEBUG [RS:1;8ef925b832e3:40635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,688 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:50,688 INFO [RS:1;8ef925b832e3:40635 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:50,688 INFO [RS:1;8ef925b832e3:40635 {}] hbase.ChoreService(370): Chore service for: regionserver/8ef925b832e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:50,689 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T05:43:50,689 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T05:43:50,689 INFO [regionserver/8ef925b832e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:50,689 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T05:43:50,689 INFO [RS:1;8ef925b832e3:40635 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:50,689 INFO [RS:1;8ef925b832e3:40635 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40635 2024-11-25T05:43:50,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8ef925b832e3,40635,1732513426089 2024-11-25T05:43:50,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:50,692 INFO [RS:1;8ef925b832e3:40635 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:50,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8ef925b832e3,40635,1732513426089] 2024-11-25T05:43:50,695 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8ef925b832e3,40635,1732513426089 already deleted, retry=false 2024-11-25T05:43:50,695 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8ef925b832e3,40635,1732513426089 expired; onlineServers=2 2024-11-25T05:43:50,699 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/default/TestHBaseWalOnEC/007ad91201e659f6c1b4f8fa73808a46/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-25T05:43:50,704 INFO [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,704 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 007ad91201e659f6c1b4f8fa73808a46: Waiting for close lock at 1732513430661Running coprocessor pre-close hooks at 1732513430662 (+1 ms)Disabling compacts and flushes for region at 1732513430662Disabling writes for close at 1732513430662Writing region close event to WAL at 1732513430675 (+13 ms)Running coprocessor post-close hooks at 1732513430702 (+27 ms)Closed at 1732513430703 (+1 ms) 2024-11-25T05:43:50,704 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46. 2024-11-25T05:43:50,721 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/info/ab2a9918735c496ea2b24e0e8494f2c2 is 153, key is TestHBaseWalOnEC,,1732513429296.007ad91201e659f6c1b4f8fa73808a46./info:regioninfo/1732513429777/Put/seqid=0 2024-11-25T05:43:50,726 INFO [regionserver/8ef925b832e3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T05:43:50,726 INFO [regionserver/8ef925b832e3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T05:43:50,727 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:50,728 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:50,736 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1868430370_22 at /127.0.0.1:48836 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48836 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:50,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-25T05:43:50,751 INFO [regionserver/8ef925b832e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:50,751 INFO [regionserver/8ef925b832e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:50,752 INFO [regionserver/8ef925b832e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:50,774 DEBUG [RS:2;8ef925b832e3:36001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs 2024-11-25T05:43:50,774 INFO [RS:2;8ef925b832e3:36001 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 8ef925b832e3%2C36001%2C1732513426136:(num 1732513427812) 2024-11-25T05:43:50,774 DEBUG [RS:2;8ef925b832e3:36001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:50,774 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:50,774 INFO [RS:2;8ef925b832e3:36001 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:50,774 INFO [RS:2;8ef925b832e3:36001 {}] hbase.ChoreService(370): Chore service for: regionserver/8ef925b832e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:50,774 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T05:43:50,774 INFO [regionserver/8ef925b832e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:50,774 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T05:43:50,775 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T05:43:50,775 INFO [RS:2;8ef925b832e3:36001 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:50,775 INFO [RS:2;8ef925b832e3:36001 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36001 2024-11-25T05:43:50,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8ef925b832e3,36001,1732513426136 2024-11-25T05:43:50,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:50,776 INFO [RS:2;8ef925b832e3:36001 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:50,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8ef925b832e3,36001,1732513426136] 2024-11-25T05:43:50,779 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8ef925b832e3,36001,1732513426136 already deleted, retry=false 2024-11-25T05:43:50,779 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8ef925b832e3,36001,1732513426136 expired; onlineServers=1 2024-11-25T05:43:50,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:50,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40635-0x10075683fbb0002, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:50,795 INFO [RS:1;8ef925b832e3:40635 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:50,795 INFO [RS:1;8ef925b832e3:40635 {}] regionserver.HRegionServer(1031): Exiting; stopping=8ef925b832e3,40635,1732513426089; zookeeper connection closed. 2024-11-25T05:43:50,796 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1db0a956 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1db0a956 2024-11-25T05:43:50,871 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T05:43:50,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:50,878 INFO [RS:2;8ef925b832e3:36001 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:50,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36001-0x10075683fbb0003, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:50,878 INFO [RS:2;8ef925b832e3:36001 {}] regionserver.HRegionServer(1031): Exiting; stopping=8ef925b832e3,36001,1732513426136; zookeeper connection closed. 2024-11-25T05:43:50,878 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3b79bed9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3b79bed9 2024-11-25T05:43:51,072 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T05:43:51,145 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,145 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/info/ab2a9918735c496ea2b24e0e8494f2c2 2024-11-25T05:43:51,182 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/ns/90177d161b6b46699ea0679a38b2c1ad is 43, key is default/ns:d/1732513428981/Put/seqid=0 2024-11-25T05:43:51,184 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,184 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1868430370_22 at /127.0.0.1:54774 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:45395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54774 dst: /127.0.0.1:45395 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:51,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-25T05:43:51,198 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,198 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/ns/90177d161b6b46699ea0679a38b2c1ad 2024-11-25T05:43:51,229 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/table/e82a5d50b22340cfa495cd2986398810 is 52, key is TestHBaseWalOnEC/table:state/1732513429797/Put/seqid=0 2024-11-25T05:43:51,231 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,231 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1868430370_22 at /127.0.0.1:54264 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:46773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54264 dst: /127.0.0.1:46773 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:51,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-25T05:43:51,240 WARN [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,240 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/table/e82a5d50b22340cfa495cd2986398810 2024-11-25T05:43:51,253 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/info/ab2a9918735c496ea2b24e0e8494f2c2 as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/info/ab2a9918735c496ea2b24e0e8494f2c2 2024-11-25T05:43:51,265 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/info/ab2a9918735c496ea2b24e0e8494f2c2, entries=10, sequenceid=11, filesize=6.5 K 2024-11-25T05:43:51,267 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/ns/90177d161b6b46699ea0679a38b2c1ad as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/ns/90177d161b6b46699ea0679a38b2c1ad 2024-11-25T05:43:51,272 DEBUG [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T05:43:51,278 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/ns/90177d161b6b46699ea0679a38b2c1ad, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T05:43:51,279 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/.tmp/table/e82a5d50b22340cfa495cd2986398810 as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/table/e82a5d50b22340cfa495cd2986398810 2024-11-25T05:43:51,291 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/table/e82a5d50b22340cfa495cd2986398810, entries=2, sequenceid=11, filesize=5.1 K 2024-11-25T05:43:51,292 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 621ms, sequenceid=11, compaction requested=false 2024-11-25T05:43:51,293 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T05:43:51,301 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T05:43:51,302 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T05:43:51,303 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T05:43:51,303 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732513430670Running coprocessor pre-close hooks at 1732513430670Disabling compacts and flushes for region at 1732513430670Disabling writes for close at 1732513430671 (+1 ms)Obtaining lock to block concurrent updates at 1732513430671Preparing flush snapshotting stores in 1588230740 at 1732513430671Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732513430673 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732513430675 (+2 ms)Flushing 1588230740/info: creating writer at 1732513430675Flushing 1588230740/info: appending metadata at 1732513430714 (+39 ms)Flushing 1588230740/info: closing flushed file at 1732513430715 (+1 ms)Flushing 1588230740/ns: creating writer at 1732513431162 (+447 ms)Flushing 1588230740/ns: appending metadata at 1732513431181 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732513431181Flushing 1588230740/table: creating writer at 1732513431210 (+29 ms)Flushing 1588230740/table: appending metadata at 1732513431228 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732513431228Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78843d5b: reopening flushed file at 1732513431251 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41c67b08: reopening flushed file at 1732513431265 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1aa6b692: reopening flushed file at 1732513431278 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 621ms, sequenceid=11, compaction requested=false at 1732513431292 (+14 ms)Writing region close event to WAL at 1732513431294 (+2 ms)Running coprocessor post-close hooks at 1732513431302 (+8 ms)Closed at 1732513431303 (+1 ms) 2024-11-25T05:43:51,303 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T05:43:51,472 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(976): stopping server 8ef925b832e3,37013,1732513425956; all regions closed. 2024-11-25T05:43:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_1073741829_1019 (size=2751) 2024-11-25T05:43:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741829_1019 (size=2751) 2024-11-25T05:43:51,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_1073741829_1019 (size=2751) 2024-11-25T05:43:51,482 DEBUG [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs 2024-11-25T05:43:51,482 INFO [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 8ef925b832e3%2C37013%2C1732513425956.meta:.meta(num 1732513428798) 2024-11-25T05:43:51,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_1073741828_1016 (size=1298) 2024-11-25T05:43:51,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_1073741828_1016 (size=1298) 2024-11-25T05:43:51,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741828_1016 (size=1298) 2024-11-25T05:43:51,489 DEBUG [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/oldWALs 2024-11-25T05:43:51,489 INFO [RS:0;8ef925b832e3:37013 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 8ef925b832e3%2C37013%2C1732513425956:(num 1732513427812) 2024-11-25T05:43:51,489 DEBUG [RS:0;8ef925b832e3:37013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:51,489 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:51,489 INFO [RS:0;8ef925b832e3:37013 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:51,490 INFO [RS:0;8ef925b832e3:37013 {}] hbase.ChoreService(370): Chore service for: regionserver/8ef925b832e3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:51,490 INFO [RS:0;8ef925b832e3:37013 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:51,490 INFO [regionserver/8ef925b832e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:51,490 INFO [RS:0;8ef925b832e3:37013 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37013 2024-11-25T05:43:51,492 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8ef925b832e3,37013,1732513425956 2024-11-25T05:43:51,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:51,492 INFO [RS:0;8ef925b832e3:37013 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:51,493 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8ef925b832e3,37013,1732513425956] 2024-11-25T05:43:51,494 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8ef925b832e3,37013,1732513425956 already deleted, retry=false 2024-11-25T05:43:51,494 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8ef925b832e3,37013,1732513425956 expired; onlineServers=0 2024-11-25T05:43:51,494 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8ef925b832e3,46037,1732513425040' ***** 2024-11-25T05:43:51,494 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T05:43:51,494 INFO [M:0;8ef925b832e3:46037 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:51,494 INFO [M:0;8ef925b832e3:46037 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:51,494 DEBUG [M:0;8ef925b832e3:46037 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T05:43:51,494 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T05:43:51,494 DEBUG [M:0;8ef925b832e3:46037 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T05:43:51,494 DEBUG [master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.large.0-1732513427433 {}] cleaner.HFileCleaner(306): Exit Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.large.0-1732513427433,5,FailOnTimeoutGroup] 2024-11-25T05:43:51,494 DEBUG [master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.small.0-1732513427438 {}] cleaner.HFileCleaner(306): Exit Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.small.0-1732513427438,5,FailOnTimeoutGroup] 2024-11-25T05:43:51,495 INFO [M:0;8ef925b832e3:46037 {}] hbase.ChoreService(370): Chore service for: master/8ef925b832e3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:51,495 INFO [M:0;8ef925b832e3:46037 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:51,495 DEBUG [M:0;8ef925b832e3:46037 {}] master.HMaster(1795): Stopping service threads 2024-11-25T05:43:51,495 INFO [M:0;8ef925b832e3:46037 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T05:43:51,495 INFO [M:0;8ef925b832e3:46037 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T05:43:51,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:51,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:51,496 INFO [M:0;8ef925b832e3:46037 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T05:43:51,496 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T05:43:51,497 DEBUG [M:0;8ef925b832e3:46037 {}] zookeeper.ZKUtil(347): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T05:43:51,497 WARN [M:0;8ef925b832e3:46037 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T05:43:51,498 INFO [M:0;8ef925b832e3:46037 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/.lastflushedseqids 2024-11-25T05:43:51,508 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,509 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,518 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:48864 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45471:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48864 dst: /127.0.0.1:45471 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-25T05:43:51,523 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,523 INFO [M:0;8ef925b832e3:46037 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T05:43:51,523 INFO [M:0;8ef925b832e3:46037 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T05:43:51,524 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T05:43:51,524 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:51,524 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:51,524 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T05:43:51,524 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:51,524 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-25T05:43:51,545 DEBUG [M:0;8ef925b832e3:46037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a7be43ce7c9f4de0b56dcad50bfc4574 is 82, key is hbase:meta,,1/info:regioninfo/1732513428904/Put/seqid=0 2024-11-25T05:43:51,547 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,547 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:54288 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46773:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54288 dst: /127.0.0.1:46773 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-25T05:43:51,555 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,556 INFO [M:0;8ef925b832e3:46037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a7be43ce7c9f4de0b56dcad50bfc4574 2024-11-25T05:43:51,583 DEBUG [M:0;8ef925b832e3:46037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/64b7277b444c4ca2a4742c7be9841658 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732513429807/Put/seqid=0 2024-11-25T05:43:51,586 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,586 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:54804 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:45395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54804 dst: /127.0.0.1:45395 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:51,593 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:51,593 INFO [RS:0;8ef925b832e3:37013 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:51,593 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37013-0x10075683fbb0001, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:51,593 INFO [RS:0;8ef925b832e3:37013 {}] regionserver.HRegionServer(1031): Exiting; stopping=8ef925b832e3,37013,1732513425956; zookeeper connection closed. 2024-11-25T05:43:51,593 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4de326e3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4de326e3 2024-11-25T05:43:51,594 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-25T05:43:51,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-25T05:43:51,597 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,597 INFO [M:0;8ef925b832e3:46037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/64b7277b444c4ca2a4742c7be9841658 2024-11-25T05:43:51,627 DEBUG [M:0;8ef925b832e3:46037 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/932e6792f6284da1936f03c9e4e40fcb is 69, key is 8ef925b832e3,36001,1732513426136/rs:state/1732513427586/Put/seqid=0 2024-11-25T05:43:51,630 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,630 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-25T05:43:51,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1486503188_22 at /127.0.0.1:54820 [Receiving block BP-552640952-172.17.0.2-1732513421321:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:45395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54820 dst: /127.0.0.1:45395 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T05:43:51,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-25T05:43:51,638 WARN [M:0;8ef925b832e3:46037 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-25T05:43:51,638 INFO [M:0;8ef925b832e3:46037 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/932e6792f6284da1936f03c9e4e40fcb 2024-11-25T05:43:51,648 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a7be43ce7c9f4de0b56dcad50bfc4574 as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a7be43ce7c9f4de0b56dcad50bfc4574 2024-11-25T05:43:51,657 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a7be43ce7c9f4de0b56dcad50bfc4574, entries=8, sequenceid=72, filesize=5.5 K 2024-11-25T05:43:51,659 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/64b7277b444c4ca2a4742c7be9841658 as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/64b7277b444c4ca2a4742c7be9841658 2024-11-25T05:43:51,669 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/64b7277b444c4ca2a4742c7be9841658, entries=8, sequenceid=72, filesize=6.3 K 2024-11-25T05:43:51,671 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/932e6792f6284da1936f03c9e4e40fcb as hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/932e6792f6284da1936f03c9e4e40fcb 2024-11-25T05:43:51,680 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/932e6792f6284da1936f03c9e4e40fcb, entries=3, sequenceid=72, filesize=5.2 K 2024-11-25T05:43:51,681 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=72, compaction requested=false 2024-11-25T05:43:51,682 INFO [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:51,683 DEBUG [M:0;8ef925b832e3:46037 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732513431524Disabling compacts and flushes for region at 1732513431524Disabling writes for close at 1732513431524Obtaining lock to block concurrent updates at 1732513431524Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732513431524Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1732513431525 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732513431526 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732513431526Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732513431544 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732513431544Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732513431564 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732513431583 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732513431583Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732513431605 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732513431627 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732513431627Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b52a572: reopening flushed file at 1732513431647 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b36276e: reopening flushed file at 1732513431657 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a5b63eb: reopening flushed file at 1732513431669 (+12 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=72, compaction requested=false at 1732513431681 (+12 ms)Writing region close event to WAL at 1732513431682 (+1 ms)Closed at 1732513431682 2024-11-25T05:43:51,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45395 is added to blk_1073741825_1011 (size=32683) 2024-11-25T05:43:51,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741825_1011 (size=32683) 2024-11-25T05:43:51,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45471 is added to blk_1073741825_1011 (size=32683) 2024-11-25T05:43:51,689 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:51,689 INFO [M:0;8ef925b832e3:46037 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T05:43:51,689 INFO [M:0;8ef925b832e3:46037 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46037 2024-11-25T05:43:51,690 INFO [M:0;8ef925b832e3:46037 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:51,791 INFO [M:0;8ef925b832e3:46037 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:51,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:51,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46037-0x10075683fbb0000, quorum=127.0.0.1:58462, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:51,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67fa62aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:51,799 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3e9f3a79{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:51,799 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:51,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41dce2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:51,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@435daa1b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:51,804 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T05:43:51,804 WARN [BP-552640952-172.17.0.2-1732513421321 heartbeating to localhost/127.0.0.1:44447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T05:43:51,804 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T05:43:51,804 WARN [BP-552640952-172.17.0.2-1732513421321 heartbeating to localhost/127.0.0.1:44447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-552640952-172.17.0.2-1732513421321 (Datanode Uuid 42412a7d-66f5-4914-bfcf-062eb08ff6f4) service to localhost/127.0.0.1:44447 2024-11-25T05:43:51,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data5/current/BP-552640952-172.17.0.2-1732513421321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:51,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data6/current/BP-552640952-172.17.0.2-1732513421321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:51,806 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T05:43:51,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41033a80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:51,809 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14721f03{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:51,809 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:51,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28ffdd72{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:51,809 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23e84c60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:51,810 WARN [BP-552640952-172.17.0.2-1732513421321 heartbeating to localhost/127.0.0.1:44447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T05:43:51,810 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T05:43:51,810 WARN [BP-552640952-172.17.0.2-1732513421321 heartbeating to localhost/127.0.0.1:44447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-552640952-172.17.0.2-1732513421321 (Datanode Uuid 6d73bc6f-b45e-4f9c-b1a3-bff50e110552) service to localhost/127.0.0.1:44447 2024-11-25T05:43:51,810 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T05:43:51,811 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data3/current/BP-552640952-172.17.0.2-1732513421321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:51,811 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data4/current/BP-552640952-172.17.0.2-1732513421321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:51,812 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T05:43:51,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6bf2c732{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:51,814 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1182e874{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:51,814 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:51,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a906869{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:51,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7728820b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:51,816 WARN [BP-552640952-172.17.0.2-1732513421321 heartbeating to localhost/127.0.0.1:44447 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T05:43:51,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T05:43:51,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T05:43:51,816 WARN [BP-552640952-172.17.0.2-1732513421321 heartbeating to localhost/127.0.0.1:44447 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-552640952-172.17.0.2-1732513421321 (Datanode Uuid 77c34a3a-8275-40ed-9179-217c23bddd81) service to localhost/127.0.0.1:44447 2024-11-25T05:43:51,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data1/current/BP-552640952-172.17.0.2-1732513421321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:51,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/cluster_d87f5d05-c83e-133c-4d2e-6def63a8a3b5/data/data2/current/BP-552640952-172.17.0.2-1732513421321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:51,817 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T05:43:51,826 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e22261{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T05:43:51,827 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3599471c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:51,827 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:51,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2faf2775{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:51,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e18bd18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:51,837 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T05:43:51,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T05:43:51,881 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 157), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=398 (was 407), ProcessCount=11 (was 11), AvailableMemoryMB=7772 (was 8070) 2024-11-25T05:43:51,887 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=398, ProcessCount=11, AvailableMemoryMB=7772 2024-11-25T05:43:51,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T05:43:51,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.log.dir so I do NOT create it in target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace 2024-11-25T05:43:51,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3c591b14-b191-3b6b-4bdf-d83287d1ec46/hadoop.tmp.dir so I do NOT create it in target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace 2024-11-25T05:43:51,888 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e, deleteOnExit=true 2024-11-25T05:43:51,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T05:43:51,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/test.cache.data in system properties and HBase conf 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir in system properties and HBase conf 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T05:43:51,889 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T05:43:51,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/nfs.dump.dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/java.io.tmpdir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T05:43:51,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T05:43:51,891 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T05:43:51,958 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:51,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:51,969 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:51,970 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:51,970 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T05:43:51,971 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:51,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47444538{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:51,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14e12329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:52,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53bceb71{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/java.io.tmpdir/jetty-localhost-45195-hadoop-hdfs-3_4_1-tests_jar-_-any-5114284076592011441/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T05:43:52,086 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e67c333{HTTP/1.1, (http/1.1)}{localhost:45195} 2024-11-25T05:43:52,086 INFO [Time-limited test {}] server.Server(415): Started @13027ms 2024-11-25T05:43:52,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:52,170 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:52,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:52,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:52,171 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T05:43:52,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a678bb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:52,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@195bb277{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:52,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@538c111d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/java.io.tmpdir/jetty-localhost-42733-hadoop-hdfs-3_4_1-tests_jar-_-any-16848162782101707486/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:52,282 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@431cf7a{HTTP/1.1, (http/1.1)}{localhost:42733} 2024-11-25T05:43:52,282 INFO [Time-limited test {}] server.Server(415): Started @13223ms 2024-11-25T05:43:52,284 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T05:43:52,336 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:52,341 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:52,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:52,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:52,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T05:43:52,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3186c14b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:52,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f5ab69f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:52,364 WARN [Thread-537 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data1/current/BP-955224312-172.17.0.2-1732513431917/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:52,364 WARN [Thread-538 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data2/current/BP-955224312-172.17.0.2-1732513431917/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:52,386 WARN [Thread-516 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T05:43:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e3b0c3a69e6b520 with lease ID 0xd7644bf2a08c4431: Processing first storage report for DS-bf0b8286-2e8e-4c5f-b4bd-a4536c7ba7c5 from datanode DatanodeRegistration(127.0.0.1:35497, datanodeUuid=64ffed0a-e056-4116-9c84-c4f717469ba6, infoPort=35033, infoSecurePort=0, ipcPort=43965, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917) 2024-11-25T05:43:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e3b0c3a69e6b520 with lease ID 0xd7644bf2a08c4431: from storage DS-bf0b8286-2e8e-4c5f-b4bd-a4536c7ba7c5 node DatanodeRegistration(127.0.0.1:35497, datanodeUuid=64ffed0a-e056-4116-9c84-c4f717469ba6, infoPort=35033, infoSecurePort=0, ipcPort=43965, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:52,390 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6e3b0c3a69e6b520 with lease ID 0xd7644bf2a08c4431: Processing first storage report for DS-059403e1-712d-4ee8-b95d-1b568fefd6f0 from datanode DatanodeRegistration(127.0.0.1:35497, datanodeUuid=64ffed0a-e056-4116-9c84-c4f717469ba6, infoPort=35033, infoSecurePort=0, ipcPort=43965, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917) 2024-11-25T05:43:52,390 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e3b0c3a69e6b520 with lease ID 0xd7644bf2a08c4431: from storage DS-059403e1-712d-4ee8-b95d-1b568fefd6f0 node DatanodeRegistration(127.0.0.1:35497, datanodeUuid=64ffed0a-e056-4116-9c84-c4f717469ba6, infoPort=35033, infoSecurePort=0, ipcPort=43965, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:52,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@143db790{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/java.io.tmpdir/jetty-localhost-35835-hadoop-hdfs-3_4_1-tests_jar-_-any-8945294257182550143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:52,462 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f2822e1{HTTP/1.1, (http/1.1)}{localhost:35835} 2024-11-25T05:43:52,462 INFO [Time-limited test {}] server.Server(415): Started @13403ms 2024-11-25T05:43:52,464 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T05:43:52,503 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T05:43:52,507 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T05:43:52,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T05:43:52,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T05:43:52,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T05:43:52,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f0b4cbc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,AVAILABLE} 2024-11-25T05:43:52,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2590be83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T05:43:52,534 WARN [Thread-572 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data3/current/BP-955224312-172.17.0.2-1732513431917/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:52,534 WARN [Thread-573 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data4/current/BP-955224312-172.17.0.2-1732513431917/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:52,554 WARN [Thread-552 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T05:43:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x301cc2e232ffac40 with lease ID 0xd7644bf2a08c4432: Processing first storage report for DS-36eb4cd7-e7e0-405c-8ed3-aa0afc8a1165 from datanode DatanodeRegistration(127.0.0.1:39287, datanodeUuid=1c33aa96-25f5-42bf-a766-4cb708c1ee1d, infoPort=43851, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917) 2024-11-25T05:43:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x301cc2e232ffac40 with lease ID 0xd7644bf2a08c4432: from storage DS-36eb4cd7-e7e0-405c-8ed3-aa0afc8a1165 node DatanodeRegistration(127.0.0.1:39287, datanodeUuid=1c33aa96-25f5-42bf-a766-4cb708c1ee1d, infoPort=43851, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x301cc2e232ffac40 with lease ID 0xd7644bf2a08c4432: Processing first storage report for DS-b07fceea-f332-46e7-b87f-829c10e8de64 from datanode DatanodeRegistration(127.0.0.1:39287, datanodeUuid=1c33aa96-25f5-42bf-a766-4cb708c1ee1d, infoPort=43851, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917) 2024-11-25T05:43:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x301cc2e232ffac40 with lease ID 0xd7644bf2a08c4432: from storage DS-b07fceea-f332-46e7-b87f-829c10e8de64 node DatanodeRegistration(127.0.0.1:39287, datanodeUuid=1c33aa96-25f5-42bf-a766-4cb708c1ee1d, infoPort=43851, infoSecurePort=0, ipcPort=39921, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:52,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12ef114e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/java.io.tmpdir/jetty-localhost-33915-hadoop-hdfs-3_4_1-tests_jar-_-any-16654883400552467991/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:52,628 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2123257e{HTTP/1.1, (http/1.1)}{localhost:33915} 2024-11-25T05:43:52,628 INFO [Time-limited test {}] server.Server(415): Started @13569ms 2024-11-25T05:43:52,630 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T05:43:52,715 WARN [Thread-599 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data6/current/BP-955224312-172.17.0.2-1732513431917/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:52,715 WARN [Thread-598 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data5/current/BP-955224312-172.17.0.2-1732513431917/current, will proceed with Du for space computation calculation, 2024-11-25T05:43:52,738 WARN [Thread-587 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T05:43:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3195e505e6221079 with lease ID 0xd7644bf2a08c4433: Processing first storage report for DS-0e2fb7d6-7ac0-4ed0-8014-2e79b62b6805 from datanode DatanodeRegistration(127.0.0.1:37875, datanodeUuid=29cf785c-8621-456f-b81b-ae9ff45b7baf, infoPort=36357, infoSecurePort=0, ipcPort=34629, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917) 2024-11-25T05:43:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3195e505e6221079 with lease ID 0xd7644bf2a08c4433: from storage DS-0e2fb7d6-7ac0-4ed0-8014-2e79b62b6805 node DatanodeRegistration(127.0.0.1:37875, datanodeUuid=29cf785c-8621-456f-b81b-ae9ff45b7baf, infoPort=36357, infoSecurePort=0, ipcPort=34629, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3195e505e6221079 with lease ID 0xd7644bf2a08c4433: Processing first storage report for DS-3cb6d281-553d-4c0f-a40c-d834e87dfb17 from datanode DatanodeRegistration(127.0.0.1:37875, datanodeUuid=29cf785c-8621-456f-b81b-ae9ff45b7baf, infoPort=36357, infoSecurePort=0, ipcPort=34629, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917) 2024-11-25T05:43:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3195e505e6221079 with lease ID 0xd7644bf2a08c4433: from storage DS-3cb6d281-553d-4c0f-a40c-d834e87dfb17 node DatanodeRegistration(127.0.0.1:37875, datanodeUuid=29cf785c-8621-456f-b81b-ae9ff45b7baf, infoPort=36357, infoSecurePort=0, ipcPort=34629, storageInfo=lv=-57;cid=testClusterID;nsid=580101726;c=1732513431917), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T05:43:52,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace 2024-11-25T05:43:52,802 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/zookeeper_0, clientPort=57148, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T05:43:52,803 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57148 2024-11-25T05:43:52,804 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,805 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741825_1001 (size=7) 2024-11-25T05:43:52,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741825_1001 (size=7) 2024-11-25T05:43:52,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741825_1001 (size=7) 2024-11-25T05:43:52,825 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe with version=8 2024-11-25T05:43:52,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44447/user/jenkins/test-data/0e3b86bf-dade-77de-71d1-bc8ac9453c10/hbase-staging 2024-11-25T05:43:52,828 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:52,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,828 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,828 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:52,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:52,829 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T05:43:52,829 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:52,833 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37847 2024-11-25T05:43:52,835 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37847 connecting to ZooKeeper ensemble=127.0.0.1:57148 2024-11-25T05:43:52,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:378470x0, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:52,841 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37847-0x100756861500000 connected 2024-11-25T05:43:52,860 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,862 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:52,864 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe, hbase.cluster.distributed=false 2024-11-25T05:43:52,866 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:52,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37847 2024-11-25T05:43:52,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37847 2024-11-25T05:43:52,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37847 2024-11-25T05:43:52,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37847 2024-11-25T05:43:52,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37847 2024-11-25T05:43:52,897 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:52,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,897 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:52,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:52,897 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T05:43:52,898 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:52,898 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36683 2024-11-25T05:43:52,900 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36683 connecting to ZooKeeper ensemble=127.0.0.1:57148 2024-11-25T05:43:52,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,904 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,909 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:366830x0, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:52,910 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:366830x0, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:52,910 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T05:43:52,913 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36683-0x100756861500001 connected 2024-11-25T05:43:52,914 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T05:43:52,915 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T05:43:52,916 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:52,916 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36683 2024-11-25T05:43:52,917 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36683 2024-11-25T05:43:52,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36683 2024-11-25T05:43:52,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36683 2024-11-25T05:43:52,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36683 2024-11-25T05:43:52,948 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T05:43:52,948 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:52,949 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40473 2024-11-25T05:43:52,951 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40473 connecting to ZooKeeper ensemble=127.0.0.1:57148 2024-11-25T05:43:52,952 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:52,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404730x0, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:52,964 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40473-0x100756861500002 connected 2024-11-25T05:43:52,965 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:52,965 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T05:43:52,970 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T05:43:52,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T05:43:52,972 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:52,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40473 2024-11-25T05:43:52,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40473 2024-11-25T05:43:52,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40473 2024-11-25T05:43:53,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40473 2024-11-25T05:43:53,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40473 2024-11-25T05:43:53,020 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8ef925b832e3:0 server-side Connection retries=45 2024-11-25T05:43:53,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:53,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:53,020 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T05:43:53,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T05:43:53,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T05:43:53,020 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T05:43:53,021 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T05:43:53,021 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41569 2024-11-25T05:43:53,023 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41569 connecting to ZooKeeper ensemble=127.0.0.1:57148 2024-11-25T05:43:53,024 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:53,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:53,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:415690x0, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T05:43:53,030 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41569-0x100756861500003 connected 2024-11-25T05:43:53,032 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:53,032 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T05:43:53,041 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T05:43:53,042 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T05:43:53,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T05:43:53,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41569 2024-11-25T05:43:53,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41569 2024-11-25T05:43:53,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41569 2024-11-25T05:43:53,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41569 2024-11-25T05:43:53,044 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41569 2024-11-25T05:43:53,057 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8ef925b832e3:37847 2024-11-25T05:43:53,057 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,058 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,059 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:53,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:53,061 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:53,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,061 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,061 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T05:43:53,062 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8ef925b832e3,37847,1732513432828 from backup master directory 2024-11-25T05:43:53,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,063 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,063 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:53,063 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T05:43:53,075 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/hbase.id] with ID: cc9a464c-1759-40e8-8987-db6d6de54ba7 2024-11-25T05:43:53,075 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/.tmp/hbase.id 2024-11-25T05:43:53,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741826_1002 (size=42) 2024-11-25T05:43:53,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741826_1002 (size=42) 2024-11-25T05:43:53,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741826_1002 (size=42) 2024-11-25T05:43:53,099 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/.tmp/hbase.id]:[hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/hbase.id] 2024-11-25T05:43:53,119 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T05:43:53,119 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T05:43:53,121 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-25T05:43:53,123 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741827_1003 (size=196) 2024-11-25T05:43:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741827_1003 (size=196) 2024-11-25T05:43:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741827_1003 (size=196) 2024-11-25T05:43:53,137 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T05:43:53,138 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T05:43:53,138 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T05:43:53,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741828_1004 (size=1189) 2024-11-25T05:43:53,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741828_1004 (size=1189) 2024-11-25T05:43:53,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741828_1004 (size=1189) 2024-11-25T05:43:53,158 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store 2024-11-25T05:43:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741829_1005 (size=34) 2024-11-25T05:43:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741829_1005 (size=34) 2024-11-25T05:43:53,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741829_1005 (size=34) 2024-11-25T05:43:53,170 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:53,170 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T05:43:53,170 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:53,170 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:53,170 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T05:43:53,170 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:53,170 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:53,170 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732513433170Disabling compacts and flushes for region at 1732513433170Disabling writes for close at 1732513433170Writing region close event to WAL at 1732513433170Closed at 1732513433170 2024-11-25T05:43:53,172 WARN [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/.initializing 2024-11-25T05:43:53,172 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/WALs/8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,177 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C37847%2C1732513432828, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/WALs/8ef925b832e3,37847,1732513432828, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/oldWALs, maxLogs=10 2024-11-25T05:43:53,178 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8ef925b832e3%2C37847%2C1732513432828.1732513433177 2024-11-25T05:43:53,198 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/WALs/8ef925b832e3,37847,1732513432828/8ef925b832e3%2C37847%2C1732513432828.1732513433177 2024-11-25T05:43:53,205 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36357:36357),(127.0.0.1/127.0.0.1:43851:43851),(127.0.0.1/127.0.0.1:35033:35033)] 2024-11-25T05:43:53,207 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T05:43:53,207 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:53,207 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,207 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T05:43:53,212 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T05:43:53,215 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:53,216 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T05:43:53,219 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:53,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T05:43:53,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:53,223 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,224 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,224 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,226 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,226 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,227 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T05:43:53,228 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T05:43:53,238 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T05:43:53,239 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68256900, jitterRate=0.017107069492340088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T05:43:53,240 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732513433208Initializing all the Stores at 1732513433209 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433209Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513433209Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513433209Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513433209Cleaning up temporary data from old regions at 1732513433226 (+17 ms)Region opened successfully at 1732513433240 (+14 ms) 2024-11-25T05:43:53,240 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T05:43:53,246 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@398499fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:53,247 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T05:43:53,247 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T05:43:53,247 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T05:43:53,247 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T05:43:53,248 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T05:43:53,249 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T05:43:53,249 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T05:43:53,251 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T05:43:53,252 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T05:43:53,253 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T05:43:53,254 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T05:43:53,255 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T05:43:53,256 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T05:43:53,256 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T05:43:53,258 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T05:43:53,259 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T05:43:53,260 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T05:43:53,261 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T05:43:53,264 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T05:43:53,264 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T05:43:53,266 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,266 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,267 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8ef925b832e3,37847,1732513432828, sessionid=0x100756861500000, setting cluster-up flag (Was=false) 2024-11-25T05:43:53,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:53,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,269 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,273 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T05:43:53,274 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,277 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,281 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T05:43:53,282 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8ef925b832e3,37847,1732513432828 2024-11-25T05:43:53,284 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T05:43:53,286 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:53,287 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T05:43:53,287 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T05:43:53,287 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8ef925b832e3,37847,1732513432828 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T05:43:53,288 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:53,288 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:53,289 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:53,289 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8ef925b832e3:0, corePoolSize=5, maxPoolSize=5 2024-11-25T05:43:53,289 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8ef925b832e3:0, corePoolSize=10, maxPoolSize=10 2024-11-25T05:43:53,289 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,289 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:53,289 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,299 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:53,299 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T05:43:53,301 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,301 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T05:43:53,301 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732513463301 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T05:43:53,302 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,303 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T05:43:53,303 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T05:43:53,303 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T05:43:53,303 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T05:43:53,303 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T05:43:53,304 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.large.0-1732513433303,5,FailOnTimeoutGroup] 2024-11-25T05:43:53,304 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.small.0-1732513433304,5,FailOnTimeoutGroup] 2024-11-25T05:43:53,304 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,304 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T05:43:53,304 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,304 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741831_1007 (size=1321) 2024-11-25T05:43:53,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741831_1007 (size=1321) 2024-11-25T05:43:53,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741831_1007 (size=1321) 2024-11-25T05:43:53,322 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T05:43:53,322 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe 2024-11-25T05:43:53,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741832_1008 (size=32) 2024-11-25T05:43:53,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741832_1008 (size=32) 2024-11-25T05:43:53,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741832_1008 (size=32) 2024-11-25T05:43:53,339 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:53,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T05:43:53,347 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(746): ClusterId : cc9a464c-1759-40e8-8987-db6d6de54ba7 2024-11-25T05:43:53,347 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T05:43:53,347 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(746): ClusterId : cc9a464c-1759-40e8-8987-db6d6de54ba7 2024-11-25T05:43:53,347 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T05:43:53,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T05:43:53,348 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,349 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T05:43:53,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,349 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T05:43:53,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T05:43:53,351 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T05:43:53,351 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T05:43:53,351 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,351 DEBUG [RS:0;8ef925b832e3:36683 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ca52dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:53,351 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T05:43:53,351 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T05:43:53,352 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(746): ClusterId : cc9a464c-1759-40e8-8987-db6d6de54ba7 2024-11-25T05:43:53,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,352 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T05:43:53,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T05:43:53,354 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T05:43:53,354 DEBUG [RS:1;8ef925b832e3:40473 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cbedcda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:53,354 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T05:43:53,354 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,355 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T05:43:53,355 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T05:43:53,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T05:43:53,358 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T05:43:53,358 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,359 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T05:43:53,359 DEBUG [RS:2;8ef925b832e3:41569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27abcaca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8ef925b832e3/172.17.0.2:0 2024-11-25T05:43:53,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,362 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T05:43:53,363 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740 2024-11-25T05:43:53,364 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740 2024-11-25T05:43:53,366 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T05:43:53,366 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T05:43:53,367 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T05:43:53,369 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T05:43:53,369 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8ef925b832e3:36683 2024-11-25T05:43:53,369 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T05:43:53,369 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T05:43:53,369 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T05:43:53,370 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,37847,1732513432828 with port=36683, startcode=1732513432896 2024-11-25T05:43:53,373 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;8ef925b832e3:40473 2024-11-25T05:43:53,373 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T05:43:53,373 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T05:43:53,373 DEBUG [RS:0;8ef925b832e3:36683 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T05:43:53,373 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T05:43:53,374 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,37847,1732513432828 with port=40473, startcode=1732513432947 2024-11-25T05:43:53,374 DEBUG [RS:1;8ef925b832e3:40473 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T05:43:53,376 DEBUG [RS:2;8ef925b832e3:41569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;8ef925b832e3:41569 2024-11-25T05:43:53,376 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T05:43:53,376 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T05:43:53,376 DEBUG [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T05:43:53,377 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(2659): reportForDuty to master=8ef925b832e3,37847,1732513432828 with port=41569, startcode=1732513433019 2024-11-25T05:43:53,377 DEBUG [RS:2;8ef925b832e3:41569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T05:43:53,379 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T05:43:53,380 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72888281, jitterRate=0.08612002432346344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T05:43:53,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732513433339Initializing all the Stores at 1732513433341 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433341Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433345 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513433345Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433345Cleaning up temporary data from old regions at 1732513433366 (+21 ms)Region opened successfully at 1732513433381 (+15 ms) 2024-11-25T05:43:53,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T05:43:53,381 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T05:43:53,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T05:43:53,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T05:43:53,381 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T05:43:53,382 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T05:43:53,382 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732513433381Disabling compacts and flushes for region at 1732513433381Disabling writes for close at 1732513433381Writing region close event to WAL at 1732513433382 (+1 ms)Closed at 1732513433382 2024-11-25T05:43:53,383 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53099, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T05:43:53,384 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37847 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,384 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37847 {}] master.ServerManager(517): Registering regionserver=8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,384 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42497, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T05:43:53,384 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:53,384 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T05:43:53,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T05:43:53,384 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T05:43:53,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T05:43:53,386 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37847 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8ef925b832e3,41569,1732513433019 2024-11-25T05:43:53,386 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe 2024-11-25T05:43:53,386 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37847 {}] master.ServerManager(517): Registering regionserver=8ef925b832e3,41569,1732513433019 2024-11-25T05:43:53,386 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39387 2024-11-25T05:43:53,386 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T05:43:53,388 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T05:43:53,388 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37847 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8ef925b832e3,36683,1732513432896 2024-11-25T05:43:53,388 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37847 {}] master.ServerManager(517): Registering regionserver=8ef925b832e3,36683,1732513432896 2024-11-25T05:43:53,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:53,389 DEBUG [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe 2024-11-25T05:43:53,390 DEBUG [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39387 2024-11-25T05:43:53,390 DEBUG [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T05:43:53,391 DEBUG [RS:1;8ef925b832e3:40473 {}] zookeeper.ZKUtil(111): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,391 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8ef925b832e3,40473,1732513432947] 2024-11-25T05:43:53,391 WARN [RS:1;8ef925b832e3:40473 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:53,391 INFO [RS:1;8ef925b832e3:40473 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T05:43:53,391 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:53,393 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe 2024-11-25T05:43:53,393 DEBUG [RS:2;8ef925b832e3:41569 {}] zookeeper.ZKUtil(111): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8ef925b832e3,41569,1732513433019 2024-11-25T05:43:53,393 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39387 2024-11-25T05:43:53,393 WARN [RS:2;8ef925b832e3:41569 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:53,393 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T05:43:53,393 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8ef925b832e3,41569,1732513433019] 2024-11-25T05:43:53,393 INFO [RS:2;8ef925b832e3:41569 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T05:43:53,393 DEBUG [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,41569,1732513433019 2024-11-25T05:43:53,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:53,395 DEBUG [RS:0;8ef925b832e3:36683 {}] zookeeper.ZKUtil(111): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8ef925b832e3,36683,1732513432896 2024-11-25T05:43:53,395 WARN [RS:0;8ef925b832e3:36683 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T05:43:53,395 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8ef925b832e3,36683,1732513432896] 2024-11-25T05:43:53,395 INFO [RS:0;8ef925b832e3:36683 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T05:43:53,395 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,36683,1732513432896 2024-11-25T05:43:53,398 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T05:43:53,401 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T05:43:53,402 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T05:43:53,402 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T05:43:53,405 INFO [RS:2;8ef925b832e3:41569 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T05:43:53,405 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,409 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T05:43:53,409 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T05:43:53,410 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T05:43:53,411 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T05:43:53,411 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,411 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,411 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,411 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,411 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,411 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,412 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:53,413 DEBUG [RS:2;8ef925b832e3:41569 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:53,417 INFO [RS:1;8ef925b832e3:40473 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T05:43:53,417 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,418 INFO [RS:0;8ef925b832e3:36683 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T05:43:53,418 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,419 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T05:43:53,420 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T05:43:53,420 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,420 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,420 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,420 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:53,421 DEBUG [RS:1;8ef925b832e3:40473 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:53,423 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T05:43:53,424 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T05:43:53,424 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,425 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,425 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,425 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,425 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,425 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,425 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,41569,1732513433019-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8ef925b832e3:0, corePoolSize=2, maxPoolSize=2 2024-11-25T05:43:53,425 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,426 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,426 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,426 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,426 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,429 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,429 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,429 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,429 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8ef925b832e3:0, corePoolSize=1, maxPoolSize=1 2024-11-25T05:43:53,429 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,429 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,429 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:53,429 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,40473,1732513432947-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:53,430 DEBUG [RS:0;8ef925b832e3:36683 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0, corePoolSize=3, maxPoolSize=3 2024-11-25T05:43:53,449 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,449 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T05:43:53,450 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,36683,1732513432896-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,41569,1732513433019-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,450 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.Replication(171): 8ef925b832e3,41569,1732513433019 started 2024-11-25T05:43:53,461 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T05:43:53,462 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,40473,1732513432947-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,462 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,462 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.Replication(171): 8ef925b832e3,40473,1732513432947 started 2024-11-25T05:43:53,469 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T05:43:53,469 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,36683,1732513432896-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,469 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,469 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.Replication(171): 8ef925b832e3,36683,1732513432896 started 2024-11-25T05:43:53,472 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,473 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(1482): Serving as 8ef925b832e3,41569,1732513433019, RpcServer on 8ef925b832e3/172.17.0.2:41569, sessionid=0x100756861500003 2024-11-25T05:43:53,473 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T05:43:53,473 DEBUG [RS:2;8ef925b832e3:41569 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8ef925b832e3,41569,1732513433019 2024-11-25T05:43:53,473 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,41569,1732513433019' 2024-11-25T05:43:53,473 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T05:43:53,475 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T05:43:53,476 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T05:43:53,476 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T05:43:53,476 DEBUG [RS:2;8ef925b832e3:41569 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8ef925b832e3,41569,1732513433019 2024-11-25T05:43:53,476 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,41569,1732513433019' 2024-11-25T05:43:53,476 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T05:43:53,481 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T05:43:53,482 DEBUG [RS:2;8ef925b832e3:41569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T05:43:53,482 INFO [RS:2;8ef925b832e3:41569 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T05:43:53,482 INFO [RS:2;8ef925b832e3:41569 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T05:43:53,484 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,484 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1482): Serving as 8ef925b832e3,40473,1732513432947, RpcServer on 8ef925b832e3/172.17.0.2:40473, sessionid=0x100756861500002 2024-11-25T05:43:53,485 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T05:43:53,485 DEBUG [RS:1;8ef925b832e3:40473 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,485 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,40473,1732513432947' 2024-11-25T05:43:53,485 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T05:43:53,486 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T05:43:53,487 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T05:43:53,487 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T05:43:53,487 DEBUG [RS:1;8ef925b832e3:40473 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,487 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,40473,1732513432947' 2024-11-25T05:43:53,487 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T05:43:53,488 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T05:43:53,489 DEBUG [RS:1;8ef925b832e3:40473 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T05:43:53,489 INFO [RS:1;8ef925b832e3:40473 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T05:43:53,489 INFO [RS:1;8ef925b832e3:40473 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T05:43:53,489 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:53,490 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1482): Serving as 8ef925b832e3,36683,1732513432896, RpcServer on 8ef925b832e3/172.17.0.2:36683, sessionid=0x100756861500001 2024-11-25T05:43:53,490 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T05:43:53,490 DEBUG [RS:0;8ef925b832e3:36683 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8ef925b832e3,36683,1732513432896 2024-11-25T05:43:53,490 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,36683,1732513432896' 2024-11-25T05:43:53,490 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T05:43:53,491 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T05:43:53,491 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T05:43:53,491 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T05:43:53,491 DEBUG [RS:0;8ef925b832e3:36683 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8ef925b832e3,36683,1732513432896 2024-11-25T05:43:53,491 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8ef925b832e3,36683,1732513432896' 2024-11-25T05:43:53,491 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T05:43:53,492 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T05:43:53,492 DEBUG [RS:0;8ef925b832e3:36683 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T05:43:53,492 INFO [RS:0;8ef925b832e3:36683 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T05:43:53,492 INFO [RS:0;8ef925b832e3:36683 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T05:43:53,539 WARN [8ef925b832e3:37847 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T05:43:53,585 INFO [RS:2;8ef925b832e3:41569 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C41569%2C1732513433019, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,41569,1732513433019, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs, maxLogs=32 2024-11-25T05:43:53,587 INFO [RS:2;8ef925b832e3:41569 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8ef925b832e3%2C41569%2C1732513433019.1732513433587 2024-11-25T05:43:53,592 INFO [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C40473%2C1732513432947, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,40473,1732513432947, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs, maxLogs=32 2024-11-25T05:43:53,593 INFO [RS:1;8ef925b832e3:40473 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8ef925b832e3%2C40473%2C1732513432947.1732513433593 2024-11-25T05:43:53,596 INFO [RS:0;8ef925b832e3:36683 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C36683%2C1732513432896, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,36683,1732513432896, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs, maxLogs=32 2024-11-25T05:43:53,597 INFO [RS:0;8ef925b832e3:36683 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8ef925b832e3%2C36683%2C1732513432896.1732513433596 2024-11-25T05:43:53,611 INFO [RS:2;8ef925b832e3:41569 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,41569,1732513433019/8ef925b832e3%2C41569%2C1732513433019.1732513433587 2024-11-25T05:43:53,626 DEBUG [RS:2;8ef925b832e3:41569 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35033:35033),(127.0.0.1/127.0.0.1:43851:43851),(127.0.0.1/127.0.0.1:36357:36357)] 2024-11-25T05:43:53,627 INFO [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,40473,1732513432947/8ef925b832e3%2C40473%2C1732513432947.1732513433593 2024-11-25T05:43:53,628 DEBUG [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43851:43851),(127.0.0.1/127.0.0.1:35033:35033),(127.0.0.1/127.0.0.1:36357:36357)] 2024-11-25T05:43:53,632 INFO [RS:0;8ef925b832e3:36683 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,36683,1732513432896/8ef925b832e3%2C36683%2C1732513432896.1732513433596 2024-11-25T05:43:53,641 DEBUG [RS:0;8ef925b832e3:36683 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35033:35033),(127.0.0.1/127.0.0.1:43851:43851),(127.0.0.1/127.0.0.1:36357:36357)] 2024-11-25T05:43:53,789 DEBUG [8ef925b832e3:37847 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-25T05:43:53,789 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(204): Hosts are {8ef925b832e3=0} racks are {/default-rack=0} 2024-11-25T05:43:53,792 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-25T05:43:53,792 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-25T05:43:53,793 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-25T05:43:53,793 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-25T05:43:53,793 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-25T05:43:53,793 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-25T05:43:53,793 INFO [8ef925b832e3:37847 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-25T05:43:53,793 INFO [8ef925b832e3:37847 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-25T05:43:53,793 INFO [8ef925b832e3:37847 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-25T05:43:53,793 DEBUG [8ef925b832e3:37847 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-25T05:43:53,793 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8ef925b832e3,40473,1732513432947 2024-11-25T05:43:53,795 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8ef925b832e3,40473,1732513432947, state=OPENING 2024-11-25T05:43:53,796 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T05:43:53,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,797 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:53,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:53,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:53,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:53,798 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:53,799 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T05:43:53,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8ef925b832e3,40473,1732513432947}] 2024-11-25T05:43:53,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T05:43:53,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T05:43:53,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T05:43:53,954 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T05:43:53,956 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53723, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T05:43:53,962 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T05:43:53,963 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T05:43:53,966 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8ef925b832e3%2C40473%2C1732513432947.meta, suffix=.meta, logDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,40473,1732513432947, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs, maxLogs=32 2024-11-25T05:43:53,967 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8ef925b832e3%2C40473%2C1732513432947.meta.1732513433966.meta 2024-11-25T05:43:53,976 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/WALs/8ef925b832e3,40473,1732513432947/8ef925b832e3%2C40473%2C1732513432947.meta.1732513433966.meta 2024-11-25T05:43:53,976 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43851:43851),(127.0.0.1/127.0.0.1:36357:36357),(127.0.0.1/127.0.0.1:35033:35033)] 2024-11-25T05:43:53,977 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T05:43:53,978 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T05:43:53,978 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T05:43:53,978 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T05:43:53,978 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T05:43:53,978 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:53,978 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T05:43:53,979 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T05:43:53,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T05:43:53,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T05:43:53,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T05:43:53,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T05:43:53,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T05:43:53,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T05:43:53,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T05:43:53,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T05:43:53,992 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:53,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T05:43:53,993 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T05:43:53,994 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740 2024-11-25T05:43:53,996 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740 2024-11-25T05:43:53,998 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T05:43:53,998 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T05:43:53,999 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T05:43:54,001 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T05:43:54,003 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63608931, jitterRate=-0.05215306580066681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T05:43:54,003 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T05:43:54,005 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732513433979Writing region info on filesystem at 1732513433979Initializing all the Stores at 1732513433980 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433980Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433983 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513433983Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732513433983Cleaning up temporary data from old regions at 1732513433998 (+15 ms)Running coprocessor post-open hooks at 1732513434003 (+5 ms)Region opened successfully at 1732513434004 (+1 ms) 2024-11-25T05:43:54,007 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732513433953 2024-11-25T05:43:54,010 DEBUG [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T05:43:54,011 INFO [RS_OPEN_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T05:43:54,012 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8ef925b832e3,40473,1732513432947 2024-11-25T05:43:54,013 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8ef925b832e3,40473,1732513432947, state=OPEN 2024-11-25T05:43:54,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:54,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:54,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:54,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:54,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:54,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:54,015 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T05:43:54,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T05:43:54,015 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8ef925b832e3,40473,1732513432947 2024-11-25T05:43:54,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T05:43:54,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8ef925b832e3,40473,1732513432947 in 216 msec 2024-11-25T05:43:54,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T05:43:54,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 636 msec 2024-11-25T05:43:54,027 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T05:43:54,027 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T05:43:54,028 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T05:43:54,028 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8ef925b832e3,40473,1732513432947, seqNum=-1] 2024-11-25T05:43:54,029 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T05:43:54,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39051, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T05:43:54,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 757 msec 2024-11-25T05:43:54,045 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732513434045, completionTime=-1 2024-11-25T05:43:54,045 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-25T05:43:54,045 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T05:43:54,047 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-25T05:43:54,048 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732513494048 2024-11-25T05:43:54,048 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732513554048 2024-11-25T05:43:54,048 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T05:43:54,048 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-25T05:43:54,049 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37847,1732513432828-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,049 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37847,1732513432828-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,049 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37847,1732513432828-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,049 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8ef925b832e3:37847, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,049 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,049 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,052 DEBUG [master/8ef925b832e3:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T05:43:54,055 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.992sec 2024-11-25T05:43:54,055 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T05:43:54,055 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T05:43:54,055 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T05:43:54,055 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T05:43:54,056 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T05:43:54,056 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37847,1732513432828-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T05:43:54,056 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37847,1732513432828-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T05:43:54,059 DEBUG [master/8ef925b832e3:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T05:43:54,059 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T05:43:54,059 INFO [master/8ef925b832e3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8ef925b832e3,37847,1732513432828-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T05:43:54,147 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65c8695d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T05:43:54,148 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8ef925b832e3,37847,-1 for getting cluster id 2024-11-25T05:43:54,148 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T05:43:54,150 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cc9a464c-1759-40e8-8987-db6d6de54ba7' 2024-11-25T05:43:54,151 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T05:43:54,152 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cc9a464c-1759-40e8-8987-db6d6de54ba7" 2024-11-25T05:43:54,152 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21456cab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T05:43:54,152 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8ef925b832e3,37847,-1] 2024-11-25T05:43:54,153 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T05:43:54,154 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:54,155 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44004, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T05:43:54,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54599437, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T05:43:54,157 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T05:43:54,158 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8ef925b832e3,40473,1732513432947, seqNum=-1] 2024-11-25T05:43:54,159 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T05:43:54,161 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T05:43:54,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8ef925b832e3,37847,1732513432828 2024-11-25T05:43:54,164 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T05:43:54,166 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 8ef925b832e3,37847,1732513432828 2024-11-25T05:43:54,166 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1080fefb 2024-11-25T05:43:54,166 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T05:43:54,168 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44010, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T05:43:54,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T05:43:54,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-25T05:43:54,173 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T05:43:54,174 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:54,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-25T05:43:54,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:54,176 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T05:43:54,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741837_1013 (size=392) 2024-11-25T05:43:54,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741837_1013 (size=392) 2024-11-25T05:43:54,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741837_1013 (size=392) 2024-11-25T05:43:54,203 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e54b4f555e107d70d209ebac92a580bf, NAME => 'TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe 2024-11-25T05:43:54,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741838_1014 (size=51) 2024-11-25T05:43:54,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741838_1014 (size=51) 2024-11-25T05:43:54,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741838_1014 (size=51) 2024-11-25T05:43:54,216 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:54,216 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing e54b4f555e107d70d209ebac92a580bf, disabling compactions & flushes 2024-11-25T05:43:54,216 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,216 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,216 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. after waiting 0 ms 2024-11-25T05:43:54,216 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,216 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,216 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for e54b4f555e107d70d209ebac92a580bf: Waiting for close lock at 1732513434216Disabling compacts and flushes for region at 1732513434216Disabling writes for close at 1732513434216Writing region close event to WAL at 1732513434216Closed at 1732513434216 2024-11-25T05:43:54,219 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T05:43:54,219 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1732513434219"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732513434219"}]},"ts":"1732513434219"} 2024-11-25T05:43:54,223 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T05:43:54,225 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T05:43:54,226 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732513434225"}]},"ts":"1732513434225"} 2024-11-25T05:43:54,229 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-25T05:43:54,230 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {8ef925b832e3=0} racks are {/default-rack=0} 2024-11-25T05:43:54,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-25T05:43:54,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-25T05:43:54,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-25T05:43:54,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-25T05:43:54,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-25T05:43:54,231 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-25T05:43:54,232 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-25T05:43:54,232 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-25T05:43:54,232 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-25T05:43:54,232 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-25T05:43:54,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e54b4f555e107d70d209ebac92a580bf, ASSIGN}] 2024-11-25T05:43:54,235 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e54b4f555e107d70d209ebac92a580bf, ASSIGN 2024-11-25T05:43:54,236 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e54b4f555e107d70d209ebac92a580bf, ASSIGN; state=OFFLINE, location=8ef925b832e3,36683,1732513432896; forceNewPlan=false, retain=false 2024-11-25T05:43:54,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:54,359 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T05:43:54,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T05:43:54,387 INFO [8ef925b832e3:37847 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-25T05:43:54,387 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e54b4f555e107d70d209ebac92a580bf, regionState=OPENING, regionLocation=8ef925b832e3,36683,1732513432896 2024-11-25T05:43:54,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e54b4f555e107d70d209ebac92a580bf, ASSIGN because future has completed 2024-11-25T05:43:54,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e54b4f555e107d70d209ebac92a580bf, server=8ef925b832e3,36683,1732513432896}] 2024-11-25T05:43:54,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T05:43:54,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T05:43:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:54,546 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T05:43:54,548 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55829, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T05:43:54,554 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,555 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e54b4f555e107d70d209ebac92a580bf, NAME => 'TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf.', STARTKEY => '', ENDKEY => ''} 2024-11-25T05:43:54,555 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,555 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T05:43:54,555 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,556 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,558 INFO [StoreOpener-e54b4f555e107d70d209ebac92a580bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,560 INFO [StoreOpener-e54b4f555e107d70d209ebac92a580bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e54b4f555e107d70d209ebac92a580bf columnFamilyName cf 2024-11-25T05:43:54,560 DEBUG [StoreOpener-e54b4f555e107d70d209ebac92a580bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T05:43:54,560 INFO [StoreOpener-e54b4f555e107d70d209ebac92a580bf-1 {}] regionserver.HStore(327): Store=e54b4f555e107d70d209ebac92a580bf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T05:43:54,561 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,562 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,562 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,563 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,563 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,565 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,568 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T05:43:54,569 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e54b4f555e107d70d209ebac92a580bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71176904, jitterRate=0.06061851978302002}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T05:43:54,569 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:54,571 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e54b4f555e107d70d209ebac92a580bf: Running coprocessor pre-open hook at 1732513434556Writing region info on filesystem at 1732513434556Initializing all the Stores at 1732513434557 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732513434557Cleaning up temporary data from old regions at 1732513434563 (+6 ms)Running coprocessor post-open hooks at 1732513434570 (+7 ms)Region opened successfully at 1732513434570 2024-11-25T05:43:54,572 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf., pid=6, masterSystemTime=1732513434546 2024-11-25T05:43:54,576 DEBUG [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,576 INFO [RS_OPEN_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,577 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e54b4f555e107d70d209ebac92a580bf, regionState=OPEN, openSeqNum=2, regionLocation=8ef925b832e3,36683,1732513432896 2024-11-25T05:43:54,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e54b4f555e107d70d209ebac92a580bf, server=8ef925b832e3,36683,1732513432896 because future has completed 2024-11-25T05:43:54,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T05:43:54,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e54b4f555e107d70d209ebac92a580bf, server=8ef925b832e3,36683,1732513432896 in 191 msec 2024-11-25T05:43:54,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T05:43:54,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=e54b4f555e107d70d209ebac92a580bf, ASSIGN in 356 msec 2024-11-25T05:43:54,595 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T05:43:54,595 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732513434595"}]},"ts":"1732513434595"} 2024-11-25T05:43:54,599 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-25T05:43:54,601 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T05:43:54,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 431 msec 2024-11-25T05:43:54,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T05:43:54,805 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-25T05:43:54,805 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-25T05:43:54,805 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T05:43:54,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-25T05:43:54,809 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T05:43:54,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-25T05:43:54,814 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf., hostname=8ef925b832e3,36683,1732513432896, seqNum=2] 2024-11-25T05:43:54,815 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T05:43:54,817 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T05:43:54,821 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-25T05:43:54,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-25T05:43:54,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:54,824 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-25T05:43:54,826 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T05:43:54,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T05:43:54,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:54,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-25T05:43:54,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:54,982 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing e54b4f555e107d70d209ebac92a580bf 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-25T05:43:55,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/.tmp/cf/a937120201684e41b80d7050710adb50 is 36, key is row/cf:cq/1732513434818/Put/seqid=0 2024-11-25T05:43:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741839_1015 (size=4787) 2024-11-25T05:43:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741839_1015 (size=4787) 2024-11-25T05:43:55,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741839_1015 (size=4787) 2024-11-25T05:43:55,012 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/.tmp/cf/a937120201684e41b80d7050710adb50 2024-11-25T05:43:55,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/.tmp/cf/a937120201684e41b80d7050710adb50 as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/cf/a937120201684e41b80d7050710adb50 2024-11-25T05:43:55,031 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/cf/a937120201684e41b80d7050710adb50, entries=1, sequenceid=5, filesize=4.7 K 2024-11-25T05:43:55,033 INFO [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for e54b4f555e107d70d209ebac92a580bf in 51ms, sequenceid=5, compaction requested=false 2024-11-25T05:43:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for e54b4f555e107d70d209ebac92a580bf: 2024-11-25T05:43:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:55,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8ef925b832e3:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-25T05:43:55,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-25T05:43:55,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T05:43:55,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-25T05:43:55,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 220 msec 2024-11-25T05:43:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37847 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T05:43:55,145 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-25T05:43:55,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T05:43:55,151 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T05:43:55,151 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:55,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,152 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,152 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T05:43:55,152 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T05:43:55,152 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=783864405, stopped=false 2024-11-25T05:43:55,152 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8ef925b832e3,37847,1732513432828 2024-11-25T05:43:55,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:55,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:55,153 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:55,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:55,154 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:55,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T05:43:55,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:55,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:55,154 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T05:43:55,154 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T05:43:55,154 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:55,155 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:55,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:55,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:55,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,155 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8ef925b832e3,36683,1732513432896' ***** 2024-11-25T05:43:55,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T05:43:55,155 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T05:43:55,155 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8ef925b832e3,40473,1732513432947' ***** 2024-11-25T05:43:55,155 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T05:43:55,155 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8ef925b832e3,41569,1732513433019' ***** 2024-11-25T05:43:55,155 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T05:43:55,155 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T05:43:55,156 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T05:43:55,156 INFO [RS:0;8ef925b832e3:36683 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T05:43:55,156 INFO [RS:0;8ef925b832e3:36683 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T05:43:55,156 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T05:43:55,156 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T05:43:55,156 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(3091): Received CLOSE for e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:55,156 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T05:43:55,157 INFO [RS:2;8ef925b832e3:41569 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T05:43:55,157 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T05:43:55,157 INFO [RS:2;8ef925b832e3:41569 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T05:43:55,157 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(959): stopping server 8ef925b832e3,41569,1732513433019 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T05:43:55,157 INFO [RS:2;8ef925b832e3:41569 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(959): stopping server 8ef925b832e3,40473,1732513432947 2024-11-25T05:43:55,157 INFO [RS:2;8ef925b832e3:41569 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;8ef925b832e3:41569. 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;8ef925b832e3:40473. 2024-11-25T05:43:55,157 DEBUG [RS:2;8ef925b832e3:41569 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:55,157 DEBUG [RS:2;8ef925b832e3:41569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,157 DEBUG [RS:1;8ef925b832e3:40473 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:55,157 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(976): stopping server 8ef925b832e3,41569,1732513433019; all regions closed. 2024-11-25T05:43:55,157 DEBUG [RS:1;8ef925b832e3:40473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T05:43:55,157 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T05:43:55,157 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(959): stopping server 8ef925b832e3,36683,1732513432896 2024-11-25T05:43:55,158 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T05:43:55,158 INFO [RS:0;8ef925b832e3:36683 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:55,158 INFO [RS:0;8ef925b832e3:36683 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8ef925b832e3:36683. 2024-11-25T05:43:55,158 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T05:43:55,158 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e54b4f555e107d70d209ebac92a580bf, disabling compactions & flushes 2024-11-25T05:43:55,158 DEBUG [RS:0;8ef925b832e3:36683 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T05:43:55,158 INFO [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:55,158 DEBUG [RS:0;8ef925b832e3:36683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,158 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:55,158 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. after waiting 0 ms 2024-11-25T05:43:55,158 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T05:43:55,158 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:55,158 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1325): Online Regions={e54b4f555e107d70d209ebac92a580bf=TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf.} 2024-11-25T05:43:55,158 DEBUG [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1351): Waiting on e54b4f555e107d70d209ebac92a580bf 2024-11-25T05:43:55,161 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T05:43:55,161 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T05:43:55,161 DEBUG [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T05:43:55,161 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T05:43:55,161 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T05:43:55,161 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T05:43:55,161 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,162 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T05:43:55,162 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,162 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T05:43:55,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,162 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-25T05:43:55,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,162 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,167 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/default/TestHBaseWalOnEC/e54b4f555e107d70d209ebac92a580bf/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-25T05:43:55,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741833_1009 (size=93) 2024-11-25T05:43:55,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741833_1009 (size=93) 2024-11-25T05:43:55,168 INFO [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:55,168 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e54b4f555e107d70d209ebac92a580bf: Waiting for close lock at 1732513435158Running coprocessor pre-close hooks at 1732513435158Disabling compacts and flushes for region at 1732513435158Disabling writes for close at 1732513435158Writing region close event to WAL at 1732513435162 (+4 ms)Running coprocessor post-close hooks at 1732513435168 (+6 ms)Closed at 1732513435168 2024-11-25T05:43:55,169 DEBUG [RS_CLOSE_REGION-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf. 2024-11-25T05:43:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741833_1009 (size=93) 2024-11-25T05:43:55,173 DEBUG [RS:2;8ef925b832e3:41569 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs 2024-11-25T05:43:55,173 INFO [RS:2;8ef925b832e3:41569 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8ef925b832e3%2C41569%2C1732513433019:(num 1732513433587) 2024-11-25T05:43:55,173 DEBUG [RS:2;8ef925b832e3:41569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,173 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:55,173 INFO [RS:2;8ef925b832e3:41569 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:55,173 INFO [RS:2;8ef925b832e3:41569 {}] hbase.ChoreService(370): Chore service for: regionserver/8ef925b832e3:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:55,173 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T05:43:55,173 INFO [regionserver/8ef925b832e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:55,174 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T05:43:55,174 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T05:43:55,174 INFO [RS:2;8ef925b832e3:41569 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:55,174 INFO [RS:2;8ef925b832e3:41569 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41569 2024-11-25T05:43:55,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:55,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8ef925b832e3,41569,1732513433019 2024-11-25T05:43:55,175 INFO [RS:2;8ef925b832e3:41569 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:55,176 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8ef925b832e3,41569,1732513433019] 2024-11-25T05:43:55,177 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8ef925b832e3,41569,1732513433019 already deleted, retry=false 2024-11-25T05:43:55,177 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8ef925b832e3,41569,1732513433019 expired; onlineServers=2 2024-11-25T05:43:55,184 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/info/afa473386b7b4bfa8654dee485a96465 is 153, key is TestHBaseWalOnEC,,1732513434169.e54b4f555e107d70d209ebac92a580bf./info:regioninfo/1732513434577/Put/seqid=0 2024-11-25T05:43:55,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741840_1016 (size=6637) 2024-11-25T05:43:55,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741840_1016 (size=6637) 2024-11-25T05:43:55,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741840_1016 (size=6637) 2024-11-25T05:43:55,192 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/info/afa473386b7b4bfa8654dee485a96465 2024-11-25T05:43:55,219 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/ns/a53f28a4a985420ba7ae61fd4c092b15 is 43, key is default/ns:d/1732513434032/Put/seqid=0 2024-11-25T05:43:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741841_1017 (size=5153) 2024-11-25T05:43:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741841_1017 (size=5153) 2024-11-25T05:43:55,228 INFO [regionserver/8ef925b832e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741841_1017 (size=5153) 2024-11-25T05:43:55,229 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/ns/a53f28a4a985420ba7ae61fd4c092b15 2024-11-25T05:43:55,242 INFO [regionserver/8ef925b832e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:55,251 INFO [regionserver/8ef925b832e3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:55,256 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/table/da90f9fc421c4f46872951e01ee37a1b is 52, key is TestHBaseWalOnEC/table:state/1732513434595/Put/seqid=0 2024-11-25T05:43:55,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741842_1018 (size=5249) 2024-11-25T05:43:55,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741842_1018 (size=5249) 2024-11-25T05:43:55,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741842_1018 (size=5249) 2024-11-25T05:43:55,264 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/table/da90f9fc421c4f46872951e01ee37a1b 2024-11-25T05:43:55,273 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/info/afa473386b7b4bfa8654dee485a96465 as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/info/afa473386b7b4bfa8654dee485a96465 2024-11-25T05:43:55,277 INFO [RS:2;8ef925b832e3:41569 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:55,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,277 INFO [RS:2;8ef925b832e3:41569 {}] regionserver.HRegionServer(1031): Exiting; stopping=8ef925b832e3,41569,1732513433019; zookeeper connection closed. 2024-11-25T05:43:55,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41569-0x100756861500003, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,277 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bf7ba16 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bf7ba16 2024-11-25T05:43:55,281 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/info/afa473386b7b4bfa8654dee485a96465, entries=10, sequenceid=11, filesize=6.5 K 2024-11-25T05:43:55,283 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/ns/a53f28a4a985420ba7ae61fd4c092b15 as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/ns/a53f28a4a985420ba7ae61fd4c092b15 2024-11-25T05:43:55,291 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/ns/a53f28a4a985420ba7ae61fd4c092b15, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T05:43:55,293 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/.tmp/table/da90f9fc421c4f46872951e01ee37a1b as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/table/da90f9fc421c4f46872951e01ee37a1b 2024-11-25T05:43:55,302 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/table/da90f9fc421c4f46872951e01ee37a1b, entries=2, sequenceid=11, filesize=5.1 K 2024-11-25T05:43:55,303 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false 2024-11-25T05:43:55,337 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T05:43:55,338 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T05:43:55,338 INFO [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T05:43:55,338 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732513435161Running coprocessor pre-close hooks at 1732513435161Disabling compacts and flushes for region at 1732513435161Disabling writes for close at 1732513435162 (+1 ms)Obtaining lock to block concurrent updates at 1732513435162Preparing flush snapshotting stores in 1588230740 at 1732513435162Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732513435163 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732513435163Flushing 1588230740/info: creating writer at 1732513435164 (+1 ms)Flushing 1588230740/info: appending metadata at 1732513435183 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732513435183Flushing 1588230740/ns: creating writer at 1732513435201 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732513435219 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732513435219Flushing 1588230740/table: creating writer at 1732513435237 (+18 ms)Flushing 1588230740/table: appending metadata at 1732513435255 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732513435255Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cb69a6d: reopening flushed file at 1732513435271 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@86ee4b2: reopening flushed file at 1732513435281 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b231904: reopening flushed file at 1732513435292 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 141ms, sequenceid=11, compaction requested=false at 1732513435303 (+11 ms)Writing region close event to WAL at 1732513435332 (+29 ms)Running coprocessor post-close hooks at 1732513435338 (+6 ms)Closed at 1732513435338 2024-11-25T05:43:55,339 DEBUG [RS_CLOSE_META-regionserver/8ef925b832e3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T05:43:55,358 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(976): stopping server 8ef925b832e3,36683,1732513432896; all regions closed. 2024-11-25T05:43:55,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,359 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,359 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,359 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,361 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(976): stopping server 8ef925b832e3,40473,1732513432947; all regions closed. 2024-11-25T05:43:55,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741835_1011 (size=1298) 2024-11-25T05:43:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741835_1011 (size=1298) 2024-11-25T05:43:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741835_1011 (size=1298) 2024-11-25T05:43:55,362 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,362 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,363 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,363 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,363 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,366 DEBUG [RS:0;8ef925b832e3:36683 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs 2024-11-25T05:43:55,366 INFO [RS:0;8ef925b832e3:36683 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8ef925b832e3%2C36683%2C1732513432896:(num 1732513433596) 2024-11-25T05:43:55,366 DEBUG [RS:0;8ef925b832e3:36683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,366 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:55,366 INFO [RS:0;8ef925b832e3:36683 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:55,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741836_1012 (size=2751) 2024-11-25T05:43:55,366 INFO [RS:0;8ef925b832e3:36683 {}] hbase.ChoreService(370): Chore service for: regionserver/8ef925b832e3:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:55,367 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T05:43:55,367 INFO [regionserver/8ef925b832e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:55,367 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T05:43:55,367 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T05:43:55,367 INFO [RS:0;8ef925b832e3:36683 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:55,367 INFO [RS:0;8ef925b832e3:36683 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36683 2024-11-25T05:43:55,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741836_1012 (size=2751) 2024-11-25T05:43:55,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741836_1012 (size=2751) 2024-11-25T05:43:55,368 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8ef925b832e3,36683,1732513432896 2024-11-25T05:43:55,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:55,369 INFO [RS:0;8ef925b832e3:36683 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:55,370 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8ef925b832e3,36683,1732513432896] 2024-11-25T05:43:55,371 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8ef925b832e3,36683,1732513432896 already deleted, retry=false 2024-11-25T05:43:55,371 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8ef925b832e3,36683,1732513432896 expired; onlineServers=1 2024-11-25T05:43:55,372 DEBUG [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs 2024-11-25T05:43:55,372 INFO [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8ef925b832e3%2C40473%2C1732513432947.meta:.meta(num 1732513433966) 2024-11-25T05:43:55,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,373 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,373 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,373 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,373 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741834_1010 (size=93) 2024-11-25T05:43:55,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741834_1010 (size=93) 2024-11-25T05:43:55,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741834_1010 (size=93) 2024-11-25T05:43:55,379 DEBUG [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/oldWALs 2024-11-25T05:43:55,379 INFO [RS:1;8ef925b832e3:40473 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8ef925b832e3%2C40473%2C1732513432947:(num 1732513433593) 2024-11-25T05:43:55,379 DEBUG [RS:1;8ef925b832e3:40473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T05:43:55,379 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T05:43:55,379 INFO [RS:1;8ef925b832e3:40473 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:55,379 INFO [RS:1;8ef925b832e3:40473 {}] hbase.ChoreService(370): Chore service for: regionserver/8ef925b832e3:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:55,379 INFO [RS:1;8ef925b832e3:40473 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:55,379 INFO [regionserver/8ef925b832e3:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:55,379 INFO [RS:1;8ef925b832e3:40473 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40473 2024-11-25T05:43:55,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8ef925b832e3,40473,1732513432947 2024-11-25T05:43:55,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T05:43:55,381 INFO [RS:1;8ef925b832e3:40473 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:55,381 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8ef925b832e3,40473,1732513432947] 2024-11-25T05:43:55,382 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8ef925b832e3,40473,1732513432947 already deleted, retry=false 2024-11-25T05:43:55,382 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8ef925b832e3,40473,1732513432947 expired; onlineServers=0 2024-11-25T05:43:55,382 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8ef925b832e3,37847,1732513432828' ***** 2024-11-25T05:43:55,382 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T05:43:55,382 INFO [M:0;8ef925b832e3:37847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T05:43:55,383 INFO [M:0;8ef925b832e3:37847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T05:43:55,383 DEBUG [M:0;8ef925b832e3:37847 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T05:43:55,383 DEBUG [M:0;8ef925b832e3:37847 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T05:43:55,383 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T05:43:55,383 DEBUG [master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.large.0-1732513433303 {}] cleaner.HFileCleaner(306): Exit Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.large.0-1732513433303,5,FailOnTimeoutGroup] 2024-11-25T05:43:55,383 DEBUG [master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.small.0-1732513433304 {}] cleaner.HFileCleaner(306): Exit Thread[master/8ef925b832e3:0:becomeActiveMaster-HFileCleaner.small.0-1732513433304,5,FailOnTimeoutGroup] 2024-11-25T05:43:55,383 INFO [M:0;8ef925b832e3:37847 {}] hbase.ChoreService(370): Chore service for: master/8ef925b832e3:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T05:43:55,383 INFO [M:0;8ef925b832e3:37847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T05:43:55,383 DEBUG [M:0;8ef925b832e3:37847 {}] master.HMaster(1795): Stopping service threads 2024-11-25T05:43:55,383 INFO [M:0;8ef925b832e3:37847 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T05:43:55,383 INFO [M:0;8ef925b832e3:37847 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T05:43:55,383 INFO [M:0;8ef925b832e3:37847 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T05:43:55,384 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T05:43:55,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T05:43:55,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T05:43:55,385 DEBUG [M:0;8ef925b832e3:37847 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-25T05:43:55,385 DEBUG [M:0;8ef925b832e3:37847 {}] master.ActiveMasterManager(353): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-25T05:43:55,386 INFO [M:0;8ef925b832e3:37847 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/.lastflushedseqids 2024-11-25T05:43:55,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741843_1019 (size=127) 2024-11-25T05:43:55,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741843_1019 (size=127) 2024-11-25T05:43:55,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741843_1019 (size=127) 2024-11-25T05:43:55,396 INFO [M:0;8ef925b832e3:37847 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T05:43:55,396 INFO [M:0;8ef925b832e3:37847 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T05:43:55,396 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T05:43:55,397 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:55,397 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:55,397 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T05:43:55,397 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:55,397 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-11-25T05:43:55,417 DEBUG [M:0;8ef925b832e3:37847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ab62a7bbfb94773b4ffa4acc3e8afe6 is 82, key is hbase:meta,,1/info:regioninfo/1732513434011/Put/seqid=0 2024-11-25T05:43:55,418 WARN [IPC Server handler 4 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T05:43:55,419 WARN [IPC Server handler 4 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T05:43:55,419 WARN [IPC Server handler 4 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T05:43:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741844_1020 (size=5672) 2024-11-25T05:43:55,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741844_1020 (size=5672) 2024-11-25T05:43:55,427 INFO [M:0;8ef925b832e3:37847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ab62a7bbfb94773b4ffa4acc3e8afe6 2024-11-25T05:43:55,455 DEBUG [M:0;8ef925b832e3:37847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af7662e0efa74fb7b931bfcd44c629ee is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732513434603/Put/seqid=0 2024-11-25T05:43:55,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741845_1021 (size=6441) 2024-11-25T05:43:55,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741845_1021 (size=6441) 2024-11-25T05:43:55,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741845_1021 (size=6441) 2024-11-25T05:43:55,463 INFO [M:0;8ef925b832e3:37847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af7662e0efa74fb7b931bfcd44c629ee 2024-11-25T05:43:55,470 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,470 DEBUG [pool-326-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36683-0x100756861500001, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,470 INFO [RS:0;8ef925b832e3:36683 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:55,470 INFO [RS:0;8ef925b832e3:36683 {}] regionserver.HRegionServer(1031): Exiting; stopping=8ef925b832e3,36683,1732513432896; zookeeper connection closed. 2024-11-25T05:43:55,471 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3f8ab491 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3f8ab491 2024-11-25T05:43:55,482 INFO [RS:1;8ef925b832e3:40473 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:55,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,482 INFO [RS:1;8ef925b832e3:40473 {}] regionserver.HRegionServer(1031): Exiting; stopping=8ef925b832e3,40473,1732513432947; zookeeper connection closed. 2024-11-25T05:43:55,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40473-0x100756861500002, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,483 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@84f6f4e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@84f6f4e 2024-11-25T05:43:55,483 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-25T05:43:55,488 DEBUG [M:0;8ef925b832e3:37847 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/50ba9b95783d474182613d55c6721f50 is 69, key is 8ef925b832e3,36683,1732513432896/rs:state/1732513433389/Put/seqid=0 2024-11-25T05:43:55,489 WARN [IPC Server handler 3 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T05:43:55,489 WARN [IPC Server handler 3 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T05:43:55,489 WARN [IPC Server handler 3 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T05:43:55,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741846_1022 (size=5294) 2024-11-25T05:43:55,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741846_1022 (size=5294) 2024-11-25T05:43:55,496 INFO [M:0;8ef925b832e3:37847 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/50ba9b95783d474182613d55c6721f50 2024-11-25T05:43:55,503 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ab62a7bbfb94773b4ffa4acc3e8afe6 as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0ab62a7bbfb94773b4ffa4acc3e8afe6 2024-11-25T05:43:55,510 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0ab62a7bbfb94773b4ffa4acc3e8afe6, entries=8, sequenceid=72, filesize=5.5 K 2024-11-25T05:43:55,512 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af7662e0efa74fb7b931bfcd44c629ee as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/af7662e0efa74fb7b931bfcd44c629ee 2024-11-25T05:43:55,518 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/af7662e0efa74fb7b931bfcd44c629ee, entries=8, sequenceid=72, filesize=6.3 K 2024-11-25T05:43:55,519 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/50ba9b95783d474182613d55c6721f50 as hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/50ba9b95783d474182613d55c6721f50 2024-11-25T05:43:55,526 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/b940392a-c00b-6145-cfe1-627b9b9c58fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/50ba9b95783d474182613d55c6721f50, entries=3, sequenceid=72, filesize=5.2 K 2024-11-25T05:43:55,528 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=72, compaction requested=false 2024-11-25T05:43:55,529 INFO [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T05:43:55,530 DEBUG [M:0;8ef925b832e3:37847 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732513435396Disabling compacts and flushes for region at 1732513435396Disabling writes for close at 1732513435397 (+1 ms)Obtaining lock to block concurrent updates at 1732513435397Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732513435397Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1732513435397Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732513435398 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732513435399 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732513435416 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732513435416Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732513435434 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732513435455 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732513435455Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732513435470 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732513435487 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732513435487Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e6bdfab: reopening flushed file at 1732513435502 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bf1dbad: reopening flushed file at 1732513435511 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@728fe801: reopening flushed file at 1732513435518 (+7 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=72, compaction requested=false at 1732513435528 (+10 ms)Writing region close event to WAL at 1732513435529 (+1 ms)Closed at 1732513435529 2024-11-25T05:43:55,530 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,530 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,530 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,530 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,530 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T05:43:55,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35497 is added to blk_1073741830_1006 (size=32695) 2024-11-25T05:43:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39287 is added to blk_1073741830_1006 (size=32695) 2024-11-25T05:43:55,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741830_1006 (size=32695) 2024-11-25T05:43:55,534 INFO [M:0;8ef925b832e3:37847 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T05:43:55,534 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T05:43:55,535 INFO [M:0;8ef925b832e3:37847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37847 2024-11-25T05:43:55,535 INFO [M:0;8ef925b832e3:37847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T05:43:55,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,636 INFO [M:0;8ef925b832e3:37847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T05:43:55,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37847-0x100756861500000, quorum=127.0.0.1:57148, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T05:43:55,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12ef114e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:55,639 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2123257e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:55,639 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:55,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2590be83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:55,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f0b4cbc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:55,641 WARN [BP-955224312-172.17.0.2-1732513431917 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T05:43:55,641 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T05:43:55,641 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T05:43:55,641 WARN [BP-955224312-172.17.0.2-1732513431917 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-955224312-172.17.0.2-1732513431917 (Datanode Uuid 29cf785c-8621-456f-b81b-ae9ff45b7baf) service to localhost/127.0.0.1:39387 2024-11-25T05:43:55,641 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data5/current/BP-955224312-172.17.0.2-1732513431917 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:55,642 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data6/current/BP-955224312-172.17.0.2-1732513431917 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:55,642 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T05:43:55,644 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@143db790{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:55,644 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f2822e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:55,644 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:55,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f5ab69f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:55,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3186c14b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:55,646 WARN [BP-955224312-172.17.0.2-1732513431917 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T05:43:55,646 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T05:43:55,647 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T05:43:55,647 WARN [BP-955224312-172.17.0.2-1732513431917 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-955224312-172.17.0.2-1732513431917 (Datanode Uuid 1c33aa96-25f5-42bf-a766-4cb708c1ee1d) service to localhost/127.0.0.1:39387 2024-11-25T05:43:55,647 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data3/current/BP-955224312-172.17.0.2-1732513431917 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:55,648 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data4/current/BP-955224312-172.17.0.2-1732513431917 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:55,648 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T05:43:55,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@538c111d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T05:43:55,658 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@431cf7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:55,658 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:55,658 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@195bb277{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:55,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a678bb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:55,660 WARN [BP-955224312-172.17.0.2-1732513431917 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T05:43:55,660 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T05:43:55,660 WARN [BP-955224312-172.17.0.2-1732513431917 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-955224312-172.17.0.2-1732513431917 (Datanode Uuid 64ffed0a-e056-4116-9c84-c4f717469ba6) service to localhost/127.0.0.1:39387 2024-11-25T05:43:55,660 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T05:43:55,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data1/current/BP-955224312-172.17.0.2-1732513431917 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:55,661 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/cluster_93af226e-d16b-f6be-683c-610e5dd0b63e/data/data2/current/BP-955224312-172.17.0.2-1732513431917 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T05:43:55,661 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T05:43:55,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53bceb71{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T05:43:55,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e67c333{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T05:43:55,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T05:43:55,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14e12329{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T05:43:55,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47444538{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6e2c4c56-a65d-fc7e-e4d1-4ec5b9d31ace/hadoop.log.dir/,STOPPED} 2024-11-25T05:43:55,677 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T05:43:55,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T05:43:55,712 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 85) - Thread LEAK? -, OpenFileDescriptor=518 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=390 (was 398), ProcessCount=11 (was 11), AvailableMemoryMB=7528 (was 7772)