2024-12-07 00:53:23,191 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 00:53:23,202 main DEBUG Took 0.009422 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 00:53:23,203 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 00:53:23,203 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 00:53:23,204 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 00:53:23,205 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,214 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 00:53:23,232 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,234 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,235 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,235 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,236 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,236 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,237 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,238 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,238 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,239 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,240 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,240 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,241 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,241 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,242 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,242 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,243 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,243 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,243 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,244 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,245 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,245 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,246 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:53:23,246 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,246 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 00:53:23,248 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:53:23,250 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 00:53:23,251 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 00:53:23,252 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 00:53:23,253 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 00:53:23,254 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 00:53:23,263 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 00:53:23,265 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 00:53:23,267 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 00:53:23,267 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 00:53:23,268 main DEBUG createAppenders(={Console}) 2024-12-07 00:53:23,268 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-07 00:53:23,269 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-07 00:53:23,269 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-07 00:53:23,269 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 00:53:23,270 main DEBUG OutputStream closed 2024-12-07 00:53:23,270 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 00:53:23,270 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 00:53:23,270 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-07 00:53:23,332 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 00:53:23,334 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 00:53:23,335 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 00:53:23,336 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 00:53:23,337 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 00:53:23,337 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 00:53:23,337 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 00:53:23,338 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 00:53:23,338 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 00:53:23,339 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 00:53:23,339 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 00:53:23,339 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 00:53:23,339 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 00:53:23,340 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 00:53:23,340 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 00:53:23,340 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 00:53:23,341 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 00:53:23,341 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 00:53:23,343 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 00:53:23,343 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-07 00:53:23,344 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 00:53:23,344 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-07T00:53:23,359 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-07 00:53:23,361 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 00:53:23,362 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T00:53:23,565 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39 2024-12-07T00:53:23,593 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9, deleteOnExit=true 2024-12-07T00:53:23,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/test.cache.data in system properties and HBase conf 2024-12-07T00:53:23,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T00:53:23,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir in system properties and HBase conf 2024-12-07T00:53:23,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T00:53:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T00:53:23,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T00:53:23,686 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T00:53:23,768 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T00:53:23,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:53:23,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:53:23,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T00:53:23,772 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:53:23,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T00:53:23,773 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T00:53:23,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:53:23,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:53:23,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T00:53:23,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/nfs.dump.dir in system properties and HBase conf 2024-12-07T00:53:23,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/java.io.tmpdir in system properties and HBase conf 2024-12-07T00:53:23,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:53:23,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T00:53:23,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T00:53:24,786 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T00:53:24,849 INFO [Time-limited test {}] log.Log(170): Logging initialized @2234ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T00:53:24,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:24,969 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:24,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:24,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:24,990 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:53:25,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:25,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:25,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:25,163 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/java.io.tmpdir/jetty-localhost-33179-hadoop-hdfs-3_4_1-tests_jar-_-any-7698149114015520931/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T00:53:25,171 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:33179} 2024-12-07T00:53:25,171 INFO [Time-limited test {}] server.Server(415): Started @2556ms 2024-12-07T00:53:25,702 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:25,708 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:25,709 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:25,709 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:25,709 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:53:25,710 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:25,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:25,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/java.io.tmpdir/jetty-localhost-38757-hadoop-hdfs-3_4_1-tests_jar-_-any-16487763830974497533/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:25,803 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:38757} 2024-12-07T00:53:25,803 INFO [Time-limited test {}] server.Server(415): Started @3188ms 2024-12-07T00:53:25,846 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:53:25,938 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:25,942 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:25,944 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:25,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:25,945 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:53:25,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:25,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:26,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/java.io.tmpdir/jetty-localhost-39927-hadoop-hdfs-3_4_1-tests_jar-_-any-14379232339854211450/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:26,045 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:39927} 2024-12-07T00:53:26,046 INFO [Time-limited test {}] server.Server(415): Started @3431ms 2024-12-07T00:53:26,048 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:53:26,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:26,085 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:26,089 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:26,089 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:26,089 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:53:26,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:26,090 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:26,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/java.io.tmpdir/jetty-localhost-42957-hadoop-hdfs-3_4_1-tests_jar-_-any-12709090532953455098/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:26,184 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:42957} 2024-12-07T00:53:26,184 INFO [Time-limited test {}] server.Server(415): Started @3569ms 2024-12-07T00:53:26,186 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:53:27,786 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data3/current/BP-50809909-172.17.0.2-1733532804262/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:27,786 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data1/current/BP-50809909-172.17.0.2-1733532804262/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:27,786 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data2/current/BP-50809909-172.17.0.2-1733532804262/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:27,786 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data4/current/BP-50809909-172.17.0.2-1733532804262/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:27,812 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:53:27,812 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:53:27,856 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1233d3a1792bb29b with lease ID 0x4f56c7efc0ed4b1: Processing first storage report for DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4 from datanode DatanodeRegistration(127.0.0.1:45537, datanodeUuid=fa30874e-e4de-4639-8071-c548b624a18b, infoPort=34891, infoSecurePort=0, ipcPort=41305, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262) 2024-12-07T00:53:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1233d3a1792bb29b with lease ID 0x4f56c7efc0ed4b1: from storage DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4 node DatanodeRegistration(127.0.0.1:45537, datanodeUuid=fa30874e-e4de-4639-8071-c548b624a18b, infoPort=34891, infoSecurePort=0, ipcPort=41305, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-07T00:53:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8008eb046c3ac8e5 with lease ID 0x4f56c7efc0ed4b0: Processing first storage report for DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5 from datanode DatanodeRegistration(127.0.0.1:43817, datanodeUuid=72911d0a-641e-4672-a815-3706d7302373, infoPort=42607, infoSecurePort=0, ipcPort=37971, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262) 2024-12-07T00:53:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8008eb046c3ac8e5 with lease ID 0x4f56c7efc0ed4b0: from storage DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5 node DatanodeRegistration(127.0.0.1:43817, datanodeUuid=72911d0a-641e-4672-a815-3706d7302373, infoPort=42607, infoSecurePort=0, ipcPort=37971, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T00:53:27,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1233d3a1792bb29b with lease ID 0x4f56c7efc0ed4b1: Processing first storage report for DS-3487f0ce-36f5-4f4a-8890-bb53c30320dc from datanode DatanodeRegistration(127.0.0.1:45537, datanodeUuid=fa30874e-e4de-4639-8071-c548b624a18b, infoPort=34891, infoSecurePort=0, ipcPort=41305, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262) 2024-12-07T00:53:27,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1233d3a1792bb29b with lease ID 0x4f56c7efc0ed4b1: from storage DS-3487f0ce-36f5-4f4a-8890-bb53c30320dc node DatanodeRegistration(127.0.0.1:45537, datanodeUuid=fa30874e-e4de-4639-8071-c548b624a18b, infoPort=34891, infoSecurePort=0, ipcPort=41305, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:27,859 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8008eb046c3ac8e5 with lease ID 0x4f56c7efc0ed4b0: Processing first storage report for DS-ce276e4f-e015-40c9-aa03-5a8a8443c909 from datanode DatanodeRegistration(127.0.0.1:43817, datanodeUuid=72911d0a-641e-4672-a815-3706d7302373, infoPort=42607, infoSecurePort=0, ipcPort=37971, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262) 2024-12-07T00:53:27,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8008eb046c3ac8e5 with lease ID 0x4f56c7efc0ed4b0: from storage DS-ce276e4f-e015-40c9-aa03-5a8a8443c909 node DatanodeRegistration(127.0.0.1:43817, datanodeUuid=72911d0a-641e-4672-a815-3706d7302373, infoPort=42607, infoSecurePort=0, ipcPort=37971, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:27,881 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data5/current/BP-50809909-172.17.0.2-1733532804262/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:27,881 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data6/current/BP-50809909-172.17.0.2-1733532804262/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:27,905 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:53:27,910 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6b91dad3ba14f10 with lease ID 0x4f56c7efc0ed4b2: Processing first storage report for DS-f8a201ba-efe4-4c94-8283-33163ec8e43b from datanode DatanodeRegistration(127.0.0.1:38851, datanodeUuid=0ff54060-3a02-49a5-954a-34ba79a54e5a, infoPort=39827, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262) 2024-12-07T00:53:27,910 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6b91dad3ba14f10 with lease ID 0x4f56c7efc0ed4b2: from storage DS-f8a201ba-efe4-4c94-8283-33163ec8e43b node DatanodeRegistration(127.0.0.1:38851, datanodeUuid=0ff54060-3a02-49a5-954a-34ba79a54e5a, infoPort=39827, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T00:53:27,910 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6b91dad3ba14f10 with lease ID 0x4f56c7efc0ed4b2: Processing first storage report for DS-ce3abfc1-2d6e-45bc-8e77-63c5a41fbeb8 from datanode DatanodeRegistration(127.0.0.1:38851, datanodeUuid=0ff54060-3a02-49a5-954a-34ba79a54e5a, infoPort=39827, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262) 2024-12-07T00:53:27,910 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6b91dad3ba14f10 with lease ID 0x4f56c7efc0ed4b2: from storage DS-ce3abfc1-2d6e-45bc-8e77-63c5a41fbeb8 node DatanodeRegistration(127.0.0.1:38851, datanodeUuid=0ff54060-3a02-49a5-954a-34ba79a54e5a, infoPort=39827, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=699713245;c=1733532804262), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:27,996 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39 2024-12-07T00:53:28,061 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-07T00:53:28,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=157, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=212, ProcessCount=11, AvailableMemoryMB=8050 2024-12-07T00:53:28,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T00:53:28,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-07T00:53:28,202 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/zookeeper_0, clientPort=60194, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T00:53:28,211 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60194 2024-12-07T00:53:28,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:28,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:28,293 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:28,294 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:28,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:44154 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:38851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44154 dst: /127.0.0.1:38851 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:28,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-07T00:53:28,750 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:28,761 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da with version=8 2024-12-07T00:53:28,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/hbase-staging 2024-12-07T00:53:28,844 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T00:53:29,090 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:29,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,099 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,104 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:29,104 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,104 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:29,212 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T00:53:29,262 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T00:53:29,270 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T00:53:29,273 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:29,295 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 82439 (auto-detected) 2024-12-07T00:53:29,296 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T00:53:29,311 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35431 2024-12-07T00:53:29,329 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35431 connecting to ZooKeeper ensemble=127.0.0.1:60194 2024-12-07T00:53:29,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:354310x0, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:29,454 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35431-0x101ad476fa50000 connected 2024-12-07T00:53:29,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:29,568 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da, hbase.cluster.distributed=false 2024-12-07T00:53:29,590 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:29,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35431 2024-12-07T00:53:29,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35431 2024-12-07T00:53:29,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35431 2024-12-07T00:53:29,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35431 2024-12-07T00:53:29,598 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35431 2024-12-07T00:53:29,685 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:29,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,687 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:29,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:29,689 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:53:29,691 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:29,692 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37517 2024-12-07T00:53:29,694 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37517 connecting to ZooKeeper ensemble=127.0.0.1:60194 2024-12-07T00:53:29,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,698 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375170x0, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:29,717 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37517-0x101ad476fa50001 connected 2024-12-07T00:53:29,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:29,721 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:53:29,728 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:53:29,730 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:53:29,736 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:29,737 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37517 2024-12-07T00:53:29,737 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37517 2024-12-07T00:53:29,738 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37517 2024-12-07T00:53:29,738 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37517 2024-12-07T00:53:29,738 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37517 2024-12-07T00:53:29,752 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:29,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,753 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:29,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:29,754 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:53:29,754 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:29,755 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33661 2024-12-07T00:53:29,756 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33661 connecting to ZooKeeper ensemble=127.0.0.1:60194 2024-12-07T00:53:29,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,760 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336610x0, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:29,778 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:336610x0, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:29,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33661-0x101ad476fa50002 connected 2024-12-07T00:53:29,779 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:53:29,780 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:53:29,781 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:53:29,783 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:29,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33661 2024-12-07T00:53:29,784 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33661 2024-12-07T00:53:29,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33661 2024-12-07T00:53:29,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33661 2024-12-07T00:53:29,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33661 2024-12-07T00:53:29,800 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:29,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,800 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,801 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:29,801 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:29,801 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:29,801 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:53:29,801 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:29,802 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37867 2024-12-07T00:53:29,803 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37867 connecting to ZooKeeper ensemble=127.0.0.1:60194 2024-12-07T00:53:29,805 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:29,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378670x0, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:29,822 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:378670x0, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:29,822 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37867-0x101ad476fa50003 connected 2024-12-07T00:53:29,823 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:53:29,824 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:53:29,825 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:53:29,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:29,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37867 2024-12-07T00:53:29,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37867 2024-12-07T00:53:29,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37867 2024-12-07T00:53:29,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37867 2024-12-07T00:53:29,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37867 2024-12-07T00:53:29,843 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3e92e60d7d96:35431 2024-12-07T00:53:29,844 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:29,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,856 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:29,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:29,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:29,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:29,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:29,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:29,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:29,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:29,886 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T00:53:29,888 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3e92e60d7d96,35431,1733532808947 from backup master directory 2024-12-07T00:53:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:29,896 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:29,896 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:29,898 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T00:53:29,899 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T00:53:29,958 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/hbase.id] with ID: e1a10915-b28d-4097-ad25-390bccf0bb1f 2024-12-07T00:53:29,958 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/.tmp/hbase.id 2024-12-07T00:53:29,965 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:29,965 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:29,969 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:39888 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39888 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:29,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-07T00:53:29,975 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:29,976 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/.tmp/hbase.id]:[hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/hbase.id] 2024-12-07T00:53:30,018 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:30,023 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T00:53:30,041 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-07T00:53:30,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,066 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,066 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:44190 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:38851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44190 dst: /127.0.0.1:38851 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:30,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-07T00:53:30,077 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:30,091 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:53:30,093 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T00:53:30,097 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:53:30,123 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,123 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:39906 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39906 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:30,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-07T00:53:30,133 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:30,150 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store 2024-12-07T00:53:30,165 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,165 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,167 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:39922 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39922 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:30,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-07T00:53:30,172 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:30,176 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T00:53:30,179 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:30,180 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T00:53:30,180 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:30,180 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:30,182 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T00:53:30,182 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:30,182 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:30,183 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733532810180Disabling compacts and flushes for region at 1733532810180Disabling writes for close at 1733532810182 (+2 ms)Writing region close event to WAL at 1733532810182Closed at 1733532810182 2024-12-07T00:53:30,185 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/.initializing 2024-12-07T00:53:30,185 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/WALs/3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:30,192 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:53:30,206 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C35431%2C1733532808947, suffix=, logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/WALs/3e92e60d7d96,35431,1733532808947, archiveDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/oldWALs, maxLogs=10 2024-12-07T00:53:30,231 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/WALs/3e92e60d7d96,35431,1733532808947/3e92e60d7d96%2C35431%2C1733532808947.1733532810210, exclude list is [], retry=0 2024-12-07T00:53:30,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:30,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45537,DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4,DISK] 2024-12-07T00:53:30,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38851,DS-f8a201ba-efe4-4c94-8283-33163ec8e43b,DISK] 2024-12-07T00:53:30,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43817,DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5,DISK] 2024-12-07T00:53:30,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T00:53:30,291 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/WALs/3e92e60d7d96,35431,1733532808947/3e92e60d7d96%2C35431%2C1733532808947.1733532810210 2024-12-07T00:53:30,292 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39827:39827),(127.0.0.1/127.0.0.1:34891:34891),(127.0.0.1/127.0.0.1:42607:42607)] 2024-12-07T00:53:30,293 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:53:30,293 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:30,296 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,296 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,328 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T00:53:30,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:30,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T00:53:30,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:30,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T00:53:30,363 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:30,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,366 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T00:53:30,366 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:30,367 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,370 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,371 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,376 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,377 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,380 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:53:30,384 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:30,390 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:53:30,391 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67754488, jitterRate=0.0096205472946167}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:53:30,396 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733532810307Initializing all the Stores at 1733532810309 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532810309Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532810310 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532810310Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532810310Cleaning up temporary data from old regions at 1733532810377 (+67 ms)Region opened successfully at 1733532810396 (+19 ms) 2024-12-07T00:53:30,397 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T00:53:30,429 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ebd1b73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:30,455 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T00:53:30,464 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T00:53:30,464 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T00:53:30,467 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T00:53:30,468 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T00:53:30,472 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-07T00:53:30,472 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T00:53:30,495 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T00:53:30,503 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T00:53:30,547 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T00:53:30,551 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T00:53:30,554 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T00:53:30,567 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T00:53:30,569 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T00:53:30,574 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T00:53:30,588 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T00:53:30,589 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T00:53:30,600 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T00:53:30,623 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T00:53:30,631 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T00:53:30,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:30,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:30,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:30,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:30,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,647 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3e92e60d7d96,35431,1733532808947, sessionid=0x101ad476fa50000, setting cluster-up flag (Was=false) 2024-12-07T00:53:30,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,706 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T00:53:30,711 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:30,769 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T00:53:30,771 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:30,782 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T00:53:30,833 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(746): ClusterId : e1a10915-b28d-4097-ad25-390bccf0bb1f 2024-12-07T00:53:30,833 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(746): ClusterId : e1a10915-b28d-4097-ad25-390bccf0bb1f 2024-12-07T00:53:30,835 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(746): ClusterId : e1a10915-b28d-4097-ad25-390bccf0bb1f 2024-12-07T00:53:30,836 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:53:30,836 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:53:30,836 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:53:30,849 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:30,854 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:53:30,854 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:53:30,854 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:53:30,855 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:53:30,855 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:53:30,855 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:53:30,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-07T00:53:30,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-07T00:53:30,859 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T00:53:30,865 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:53:30,865 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:53:30,865 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:53:30,865 DEBUG [RS:2;3e92e60d7d96:37867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d5a8fc8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:30,865 DEBUG [RS:1;3e92e60d7d96:33661 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3855774c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:30,865 DEBUG [RS:0;3e92e60d7d96:37517 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cc3f043, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:30,866 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T00:53:30,872 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3e92e60d7d96,35431,1733532808947 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T00:53:30,881 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:30,881 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:30,882 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:30,882 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:30,882 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3e92e60d7d96:0, corePoolSize=10, maxPoolSize=10 2024-12-07T00:53:30,882 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:30,882 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:30,883 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:30,884 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3e92e60d7d96:37517 2024-12-07T00:53:30,884 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3e92e60d7d96:37867 2024-12-07T00:53:30,884 DEBUG [RS:1;3e92e60d7d96:33661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3e92e60d7d96:33661 2024-12-07T00:53:30,888 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T00:53:30,889 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T00:53:30,889 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T00:53:30,889 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T00:53:30,889 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T00:53:30,889 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T00:53:30,889 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T00:53:30,889 DEBUG [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T00:53:30,889 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T00:53:30,889 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:30,890 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T00:53:30,892 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e92e60d7d96,35431,1733532808947 with port=37867, startcode=1733532809799 2024-12-07T00:53:30,892 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e92e60d7d96,35431,1733532808947 with port=33661, startcode=1733532809752 2024-12-07T00:53:30,892 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e92e60d7d96,35431,1733532808947 with port=37517, startcode=1733532809656 2024-12-07T00:53:30,896 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,896 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733532840896 2024-12-07T00:53:30,896 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T00:53:30,899 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T00:53:30,900 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T00:53:30,903 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T00:53:30,903 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T00:53:30,904 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T00:53:30,904 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T00:53:30,905 DEBUG [RS:2;3e92e60d7d96:37867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:53:30,905 DEBUG [RS:0;3e92e60d7d96:37517 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:53:30,905 DEBUG [RS:1;3e92e60d7d96:33661 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:53:30,908 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:30,913 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T00:53:30,914 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T00:53:30,915 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,915 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T00:53:30,915 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,921 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T00:53:30,921 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T00:53:30,927 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:47086 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:45537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47086 dst: /127.0.0.1:45537 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:30,932 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.large.0-1733532810922,5,FailOnTimeoutGroup] 2024-12-07T00:53:30,936 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.small.0-1733532810932,5,FailOnTimeoutGroup] 2024-12-07T00:53:30,936 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:30,937 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T00:53:30,938 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:30,938 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:30,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-07T00:53:30,946 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:30,947 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T00:53:30,948 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da 2024-12-07T00:53:30,948 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35125, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:53:30,948 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53567, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:53:30,948 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47271, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:53:30,955 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:30,957 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35431 {}] master.ServerManager(517): Registering regionserver=3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:30,962 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,962 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:30,966 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:47114 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:45537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47114 dst: /127.0.0.1:45537 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:30,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:30,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35431 {}] master.ServerManager(517): Registering regionserver=3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:30,974 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da 2024-12-07T00:53:30,974 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42801 2024-12-07T00:53:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-07T00:53:30,974 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T00:53:30,975 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35431 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:30,976 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da 2024-12-07T00:53:30,976 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35431 {}] master.ServerManager(517): Registering regionserver=3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:30,976 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42801 2024-12-07T00:53:30,976 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:30,976 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T00:53:30,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:30,980 DEBUG [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da 2024-12-07T00:53:30,981 DEBUG [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42801 2024-12-07T00:53:30,981 DEBUG [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T00:53:30,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T00:53:30,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T00:53:30,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:30,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T00:53:30,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:53:30,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T00:53:30,992 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:30,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T00:53:30,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T00:53:30,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:30,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:30,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T00:53:31,000 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T00:53:31,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:31,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:31,002 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T00:53:31,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740 2024-12-07T00:53:31,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740 2024-12-07T00:53:31,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T00:53:31,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T00:53:31,009 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:53:31,011 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T00:53:31,017 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:53:31,018 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65840032, jitterRate=-0.01890707015991211}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:53:31,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733532810977Initializing all the Stores at 1733532810980 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532810980Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532810980Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532810980Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532810980Cleaning up temporary data from old regions at 1733532811008 (+28 ms)Region opened successfully at 1733532811020 (+12 ms) 2024-12-07T00:53:31,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T00:53:31,021 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T00:53:31,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T00:53:31,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T00:53:31,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T00:53:31,022 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T00:53:31,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733532811020Disabling compacts and flushes for region at 1733532811020Disabling writes for close at 1733532811021 (+1 ms)Writing region close event to WAL at 1733532811022 (+1 ms)Closed at 1733532811022 2024-12-07T00:53:31,026 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:31,026 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T00:53:31,027 DEBUG [RS:2;3e92e60d7d96:37867 {}] zookeeper.ZKUtil(111): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:31,028 WARN [RS:2;3e92e60d7d96:37867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:31,028 DEBUG [RS:1;3e92e60d7d96:33661 {}] zookeeper.ZKUtil(111): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:31,028 INFO [RS:2;3e92e60d7d96:37867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:53:31,028 WARN [RS:1;3e92e60d7d96:33661 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:31,028 INFO [RS:1;3e92e60d7d96:33661 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:53:31,028 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:31,028 DEBUG [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:31,029 DEBUG [RS:0;3e92e60d7d96:37517 {}] zookeeper.ZKUtil(111): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,029 WARN [RS:0;3e92e60d7d96:37517 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:31,029 INFO [RS:0;3e92e60d7d96:37517 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:53:31,029 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,029 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e92e60d7d96,37517,1733532809656] 2024-12-07T00:53:31,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e92e60d7d96,33661,1733532809752] 2024-12-07T00:53:31,030 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e92e60d7d96,37867,1733532809799] 2024-12-07T00:53:31,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T00:53:31,046 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T00:53:31,050 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T00:53:31,059 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:53:31,059 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:53:31,059 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:53:31,072 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:53:31,072 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:53:31,072 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:53:31,077 INFO [RS:0;3e92e60d7d96:37517 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:53:31,077 INFO [RS:2;3e92e60d7d96:37867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:53:31,077 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,077 INFO [RS:1;3e92e60d7d96:33661 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:53:31,077 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,077 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,078 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T00:53:31,078 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T00:53:31,078 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T00:53:31,084 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T00:53:31,084 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T00:53:31,084 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T00:53:31,086 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,086 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,086 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,086 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,086 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,086 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,086 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,087 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:31,088 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:0;3e92e60d7d96:37517 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:31,088 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:31,088 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:31,088 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:31,088 DEBUG [RS:2;3e92e60d7d96:37867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:31,088 DEBUG [RS:1;3e92e60d7d96:33661 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:31,089 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,089 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,089 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,089 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,089 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,37867,1733532809799-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:31,090 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,33661,1733532809752-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:31,091 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,37517,1733532809656-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:31,108 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:53:31,110 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:53:31,110 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,37517,1733532809656-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,110 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,37867,1733532809799-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,110 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,110 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,110 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.Replication(171): 3e92e60d7d96,37517,1733532809656 started 2024-12-07T00:53:31,110 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.Replication(171): 3e92e60d7d96,37867,1733532809799 started 2024-12-07T00:53:31,112 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:53:31,112 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,33661,1733532809752-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,112 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,112 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.Replication(171): 3e92e60d7d96,33661,1733532809752 started 2024-12-07T00:53:31,127 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,127 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1482): Serving as 3e92e60d7d96,37517,1733532809656, RpcServer on 3e92e60d7d96/172.17.0.2:37517, sessionid=0x101ad476fa50001 2024-12-07T00:53:31,128 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:53:31,128 DEBUG [RS:0;3e92e60d7d96:37517 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,129 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,37517,1733532809656' 2024-12-07T00:53:31,129 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:53:31,130 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:53:31,130 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:53:31,130 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:53:31,130 DEBUG [RS:0;3e92e60d7d96:37517 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,130 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,37517,1733532809656' 2024-12-07T00:53:31,131 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:53:31,131 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:53:31,132 DEBUG [RS:0;3e92e60d7d96:37517 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:53:31,132 INFO [RS:0;3e92e60d7d96:37517 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:53:31,132 INFO [RS:0;3e92e60d7d96:37517 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:53:31,134 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,134 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:31,134 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1482): Serving as 3e92e60d7d96,37867,1733532809799, RpcServer on 3e92e60d7d96/172.17.0.2:37867, sessionid=0x101ad476fa50003 2024-12-07T00:53:31,134 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(1482): Serving as 3e92e60d7d96,33661,1733532809752, RpcServer on 3e92e60d7d96/172.17.0.2:33661, sessionid=0x101ad476fa50002 2024-12-07T00:53:31,134 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:53:31,135 DEBUG [RS:2;3e92e60d7d96:37867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:31,135 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:53:31,135 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,37867,1733532809799' 2024-12-07T00:53:31,135 DEBUG [RS:1;3e92e60d7d96:33661 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:31,135 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:53:31,135 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,33661,1733532809752' 2024-12-07T00:53:31,135 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:53:31,136 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:53:31,136 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:53:31,136 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:53:31,136 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:53:31,136 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:53:31,136 DEBUG [RS:2;3e92e60d7d96:37867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:31,136 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:53:31,136 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,37867,1733532809799' 2024-12-07T00:53:31,136 DEBUG [RS:1;3e92e60d7d96:33661 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:31,137 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:53:31,137 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,33661,1733532809752' 2024-12-07T00:53:31,137 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:53:31,137 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:53:31,138 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:53:31,138 DEBUG [RS:2;3e92e60d7d96:37867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:53:31,138 INFO [RS:2;3e92e60d7d96:37867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:53:31,138 INFO [RS:2;3e92e60d7d96:37867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:53:31,138 DEBUG [RS:1;3e92e60d7d96:33661 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:53:31,138 INFO [RS:1;3e92e60d7d96:33661 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:53:31,138 INFO [RS:1;3e92e60d7d96:33661 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:53:31,201 WARN [3e92e60d7d96:35431 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T00:53:31,236 INFO [RS:0;3e92e60d7d96:37517 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:53:31,239 INFO [RS:2;3e92e60d7d96:37867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:53:31,239 INFO [RS:1;3e92e60d7d96:33661 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:53:31,239 INFO [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C37517%2C1733532809656, suffix=, logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656, archiveDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs, maxLogs=32 2024-12-07T00:53:31,241 INFO [RS:1;3e92e60d7d96:33661 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C33661%2C1733532809752, suffix=, logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,33661,1733532809752, archiveDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs, maxLogs=32 2024-12-07T00:53:31,242 INFO [RS:2;3e92e60d7d96:37867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C37867%2C1733532809799, suffix=, logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37867,1733532809799, archiveDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs, maxLogs=32 2024-12-07T00:53:31,254 DEBUG [RS:0;3e92e60d7d96:37517 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656/3e92e60d7d96%2C37517%2C1733532809656.1733532811241, exclude list is [], retry=0 2024-12-07T00:53:31,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43817,DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5,DISK] 2024-12-07T00:53:31,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45537,DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4,DISK] 2024-12-07T00:53:31,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38851,DS-f8a201ba-efe4-4c94-8283-33163ec8e43b,DISK] 2024-12-07T00:53:31,279 DEBUG [RS:2;3e92e60d7d96:37867 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37867,1733532809799/3e92e60d7d96%2C37867%2C1733532809799.1733532811244, exclude list is [], retry=0 2024-12-07T00:53:31,279 DEBUG [RS:1;3e92e60d7d96:33661 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,33661,1733532809752/3e92e60d7d96%2C33661%2C1733532809752.1733532811244, exclude list is [], retry=0 2024-12-07T00:53:31,279 INFO [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656/3e92e60d7d96%2C37517%2C1733532809656.1733532811241 2024-12-07T00:53:31,280 DEBUG [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42607:42607),(127.0.0.1/127.0.0.1:39827:39827),(127.0.0.1/127.0.0.1:34891:34891)] 2024-12-07T00:53:31,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43817,DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5,DISK] 2024-12-07T00:53:31,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45537,DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4,DISK] 2024-12-07T00:53:31,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43817,DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5,DISK] 2024-12-07T00:53:31,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38851,DS-f8a201ba-efe4-4c94-8283-33163ec8e43b,DISK] 2024-12-07T00:53:31,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45537,DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4,DISK] 2024-12-07T00:53:31,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38851,DS-f8a201ba-efe4-4c94-8283-33163ec8e43b,DISK] 2024-12-07T00:53:31,295 INFO [RS:1;3e92e60d7d96:33661 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,33661,1733532809752/3e92e60d7d96%2C33661%2C1733532809752.1733532811244 2024-12-07T00:53:31,295 DEBUG [RS:1;3e92e60d7d96:33661 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42607:42607),(127.0.0.1/127.0.0.1:39827:39827),(127.0.0.1/127.0.0.1:34891:34891)] 2024-12-07T00:53:31,298 INFO [RS:2;3e92e60d7d96:37867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37867,1733532809799/3e92e60d7d96%2C37867%2C1733532809799.1733532811244 2024-12-07T00:53:31,299 DEBUG [RS:2;3e92e60d7d96:37867 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42607:42607),(127.0.0.1/127.0.0.1:39827:39827),(127.0.0.1/127.0.0.1:34891:34891)] 2024-12-07T00:53:31,456 DEBUG [3e92e60d7d96:35431 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T00:53:31,465 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(204): Hosts are {3e92e60d7d96=0} racks are {/default-rack=0} 2024-12-07T00:53:31,471 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T00:53:31,472 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T00:53:31,472 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T00:53:31,472 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T00:53:31,472 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T00:53:31,472 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T00:53:31,472 INFO [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T00:53:31,472 INFO [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T00:53:31,472 INFO [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T00:53:31,472 DEBUG [3e92e60d7d96:35431 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:53:31,479 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,485 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e92e60d7d96,37517,1733532809656, state=OPENING 2024-12-07T00:53:31,537 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T00:53:31,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:31,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:31,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:31,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:31,548 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,548 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,550 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T00:53:31,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3e92e60d7d96,37517,1733532809656}] 2024-12-07T00:53:31,734 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:53:31,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46919, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:53:31,766 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T00:53:31,766 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:53:31,767 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T00:53:31,771 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C37517%2C1733532809656.meta, suffix=.meta, logDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656, archiveDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs, maxLogs=32 2024-12-07T00:53:31,789 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656/3e92e60d7d96%2C37517%2C1733532809656.meta.1733532811773.meta, exclude list is [], retry=0 2024-12-07T00:53:31,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45537,DS-b6ffe55c-cb13-4ccb-9c57-83cf5b3d22b4,DISK] 2024-12-07T00:53:31,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38851,DS-f8a201ba-efe4-4c94-8283-33163ec8e43b,DISK] 2024-12-07T00:53:31,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43817,DS-c806d7d7-e678-46f9-8f77-6db6dbba7be5,DISK] 2024-12-07T00:53:31,797 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/WALs/3e92e60d7d96,37517,1733532809656/3e92e60d7d96%2C37517%2C1733532809656.meta.1733532811773.meta 2024-12-07T00:53:31,798 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34891:34891),(127.0.0.1/127.0.0.1:39827:39827),(127.0.0.1/127.0.0.1:42607:42607)] 2024-12-07T00:53:31,798 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:53:31,800 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T00:53:31,802 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T00:53:31,807 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T00:53:31,810 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T00:53:31,810 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:31,811 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T00:53:31,811 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T00:53:31,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T00:53:31,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T00:53:31,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:31,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:31,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T00:53:31,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T00:53:31,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:31,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:31,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T00:53:31,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T00:53:31,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:31,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:31,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T00:53:31,821 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T00:53:31,821 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:31,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:31,822 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T00:53:31,824 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740 2024-12-07T00:53:31,827 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740 2024-12-07T00:53:31,830 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T00:53:31,830 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T00:53:31,831 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:53:31,833 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T00:53:31,835 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70735287, jitterRate=0.0540379136800766}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:53:31,835 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T00:53:31,836 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733532811811Writing region info on filesystem at 1733532811811Initializing all the Stores at 1733532811813 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532811813Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532811813Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532811813Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532811813Cleaning up temporary data from old regions at 1733532811830 (+17 ms)Running coprocessor post-open hooks at 1733532811835 (+5 ms)Region opened successfully at 1733532811836 (+1 ms) 2024-12-07T00:53:31,842 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733532811727 2024-12-07T00:53:31,852 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T00:53:31,853 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T00:53:31,854 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,856 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e92e60d7d96,37517,1733532809656, state=OPEN 2024-12-07T00:53:31,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:31,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:31,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:31,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:31,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:31,896 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:31,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T00:53:31,904 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3e92e60d7d96,37517,1733532809656 in 344 msec 2024-12-07T00:53:31,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T00:53:31,910 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 869 msec 2024-12-07T00:53:31,912 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:31,912 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T00:53:31,932 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T00:53:31,934 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e92e60d7d96,37517,1733532809656, seqNum=-1] 2024-12-07T00:53:31,951 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:53:31,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49547, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:53:31,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1610 sec 2024-12-07T00:53:31,972 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733532811972, completionTime=-1 2024-12-07T00:53:31,974 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T00:53:31,975 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T00:53:31,997 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T00:53:31,997 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733532871997 2024-12-07T00:53:31,998 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733532931997 2024-12-07T00:53:31,998 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 23 msec 2024-12-07T00:53:31,999 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:53:32,005 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35431,1733532808947-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,006 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35431,1733532808947-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,006 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35431,1733532808947-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,007 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3e92e60d7d96:35431, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,008 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,008 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,013 DEBUG [master/3e92e60d7d96:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T00:53:32,041 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.145sec 2024-12-07T00:53:32,043 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T00:53:32,045 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T00:53:32,045 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T00:53:32,046 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T00:53:32,046 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T00:53:32,047 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35431,1733532808947-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:32,047 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35431,1733532808947-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T00:53:32,051 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T00:53:32,052 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T00:53:32,052 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35431,1733532808947-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:32,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296587b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:53:32,155 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T00:53:32,155 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T00:53:32,158 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3e92e60d7d96,35431,-1 for getting cluster id 2024-12-07T00:53:32,160 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T00:53:32,168 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e1a10915-b28d-4097-ad25-390bccf0bb1f' 2024-12-07T00:53:32,170 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T00:53:32,170 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e1a10915-b28d-4097-ad25-390bccf0bb1f" 2024-12-07T00:53:32,171 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@403d01fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:53:32,171 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3e92e60d7d96,35431,-1] 2024-12-07T00:53:32,173 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T00:53:32,175 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:32,175 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T00:53:32,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c738163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:53:32,178 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T00:53:32,185 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e92e60d7d96,37517,1733532809656, seqNum=-1] 2024-12-07T00:53:32,185 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:53:32,188 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:53:32,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:32,209 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T00:53:32,213 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:32,215 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3d553205 2024-12-07T00:53:32,216 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T00:53:32,218 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T00:53:32,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:53:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T00:53:32,233 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:53:32,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T00:53:32,235 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:32,238 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:53:32,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:32,247 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:32,247 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:32,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:50114 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:38851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50114 dst: /127.0.0.1:38851 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:32,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-07T00:53:32,258 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:32,261 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4c22b3ff3ef7d941482960f78cdc460e, NAME => 'TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da 2024-12-07T00:53:32,267 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:32,267 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:32,272 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:50128 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:38851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50128 dst: /127.0.0.1:38851 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:32,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-07T00:53:32,277 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:32,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:32,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 4c22b3ff3ef7d941482960f78cdc460e, disabling compactions & flushes 2024-12-07T00:53:32,278 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. after waiting 0 ms 2024-12-07T00:53:32,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,278 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4c22b3ff3ef7d941482960f78cdc460e: Waiting for close lock at 1733532812278Disabling compacts and flushes for region at 1733532812278Disabling writes for close at 1733532812278Writing region close event to WAL at 1733532812278Closed at 1733532812278 2024-12-07T00:53:32,280 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:53:32,285 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733532812280"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733532812280"}]},"ts":"1733532812280"} 2024-12-07T00:53:32,289 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T00:53:32,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:53:32,294 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733532812291"}]},"ts":"1733532812291"} 2024-12-07T00:53:32,298 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T00:53:32,298 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3e92e60d7d96=0} racks are {/default-rack=0} 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T00:53:32,300 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T00:53:32,300 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T00:53:32,300 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T00:53:32,300 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:53:32,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c22b3ff3ef7d941482960f78cdc460e, ASSIGN}] 2024-12-07T00:53:32,304 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c22b3ff3ef7d941482960f78cdc460e, ASSIGN 2024-12-07T00:53:32,306 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c22b3ff3ef7d941482960f78cdc460e, ASSIGN; state=OFFLINE, location=3e92e60d7d96,37867,1733532809799; forceNewPlan=false, retain=false 2024-12-07T00:53:32,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:32,459 INFO [3e92e60d7d96:35431 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T00:53:32,460 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c22b3ff3ef7d941482960f78cdc460e, regionState=OPENING, regionLocation=3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:32,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c22b3ff3ef7d941482960f78cdc460e, ASSIGN because future has completed 2024-12-07T00:53:32,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c22b3ff3ef7d941482960f78cdc460e, server=3e92e60d7d96,37867,1733532809799}] 2024-12-07T00:53:32,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:32,619 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:53:32,622 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51417, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:53:32,633 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,634 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c22b3ff3ef7d941482960f78cdc460e, NAME => 'TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:53:32,634 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,634 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:32,634 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,634 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,637 INFO [StoreOpener-4c22b3ff3ef7d941482960f78cdc460e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,639 INFO [StoreOpener-4c22b3ff3ef7d941482960f78cdc460e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c22b3ff3ef7d941482960f78cdc460e columnFamilyName cf 2024-12-07T00:53:32,639 DEBUG [StoreOpener-4c22b3ff3ef7d941482960f78cdc460e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:32,640 INFO [StoreOpener-4c22b3ff3ef7d941482960f78cdc460e-1 {}] regionserver.HStore(327): Store=4c22b3ff3ef7d941482960f78cdc460e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:32,640 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,642 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,642 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,643 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,643 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,646 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,650 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:53:32,651 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4c22b3ff3ef7d941482960f78cdc460e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65089957, jitterRate=-0.03008405864238739}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:53:32,651 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:32,652 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4c22b3ff3ef7d941482960f78cdc460e: Running coprocessor pre-open hook at 1733532812635Writing region info on filesystem at 1733532812635Initializing all the Stores at 1733532812636 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532812636Cleaning up temporary data from old regions at 1733532812643 (+7 ms)Running coprocessor post-open hooks at 1733532812651 (+8 ms)Region opened successfully at 1733532812652 (+1 ms) 2024-12-07T00:53:32,654 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e., pid=6, masterSystemTime=1733532812619 2024-12-07T00:53:32,657 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,657 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:32,659 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c22b3ff3ef7d941482960f78cdc460e, regionState=OPEN, openSeqNum=2, regionLocation=3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:32,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c22b3ff3ef7d941482960f78cdc460e, server=3e92e60d7d96,37867,1733532809799 because future has completed 2024-12-07T00:53:32,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T00:53:32,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4c22b3ff3ef7d941482960f78cdc460e, server=3e92e60d7d96,37867,1733532809799 in 199 msec 2024-12-07T00:53:32,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T00:53:32,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=4c22b3ff3ef7d941482960f78cdc460e, ASSIGN in 366 msec 2024-12-07T00:53:32,673 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:53:32,674 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733532812673"}]},"ts":"1733532812673"} 2024-12-07T00:53:32,676 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T00:53:32,678 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:53:32,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 452 msec 2024-12-07T00:53:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:32,873 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T00:53:32,873 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T00:53:32,875 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:53:32,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T00:53:32,882 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:53:32,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T00:53:32,891 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e., hostname=3e92e60d7d96,37867,1733532809799, seqNum=2] 2024-12-07T00:53:32,893 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:53:32,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48982, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:53:32,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T00:53:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T00:53:32,912 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T00:53:32,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T00:53:32,914 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T00:53:32,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T00:53:33,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T00:53:33,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T00:53:33,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,084 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 4c22b3ff3ef7d941482960f78cdc460e 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T00:53:33,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/.tmp/cf/58a76bca91e14978819173af7ee533a8 is 36, key is row/cf:cq/1733532812896/Put/seqid=0 2024-12-07T00:53:33,138 WARN [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,138 WARN [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1598095451_22 at /127.0.0.1:42620 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:45537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42620 dst: /127.0.0.1:45537 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-07T00:53:33,146 WARN [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,146 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/.tmp/cf/58a76bca91e14978819173af7ee533a8 2024-12-07T00:53:33,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/.tmp/cf/58a76bca91e14978819173af7ee533a8 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/cf/58a76bca91e14978819173af7ee533a8 2024-12-07T00:53:33,196 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/cf/58a76bca91e14978819173af7ee533a8, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T00:53:33,204 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 4c22b3ff3ef7d941482960f78cdc460e in 118ms, sequenceid=5, compaction requested=false 2024-12-07T00:53:33,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-07T00:53:33,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 4c22b3ff3ef7d941482960f78cdc460e: 2024-12-07T00:53:33,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T00:53:33,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T00:53:33,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T00:53:33,217 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 297 msec 2024-12-07T00:53:33,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 312 msec 2024-12-07T00:53:33,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35431 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T00:53:33,233 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T00:53:33,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T00:53:33,247 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T00:53:33,247 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:33,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,251 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T00:53:33,251 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T00:53:33,251 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=233511752, stopped=false 2024-12-07T00:53:33,252 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3e92e60d7d96,35431,1733532808947 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:33,272 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T00:53:33,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:33,273 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T00:53:33,273 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:33,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:33,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:33,273 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:33,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:33,274 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e92e60d7d96,33661,1733532809752' ***** 2024-12-07T00:53:33,274 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-12-07T00:53:33,274 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e92e60d7d96,37517,1733532809656' ***** 2024-12-07T00:53:33,275 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T00:53:33,275 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:53:33,276 INFO [RS:0;3e92e60d7d96:37517 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:53:33,276 INFO [RS:0;3e92e60d7d96:37517 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:53:33,276 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e92e60d7d96,37867,1733532809799' ***** 2024-12-07T00:53:33,276 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(959): stopping server 3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:33,276 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T00:53:33,276 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:33,276 INFO [RS:0;3e92e60d7d96:37517 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3e92e60d7d96:37517. 2024-12-07T00:53:33,276 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T00:53:33,277 DEBUG [RS:0;3e92e60d7d96:37517 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:33,277 DEBUG [RS:0;3e92e60d7d96:37517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,277 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:53:33,277 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T00:53:33,277 INFO [RS:2;3e92e60d7d96:37867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:53:33,277 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:53:33,277 INFO [RS:2;3e92e60d7d96:37867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:53:33,277 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:53:33,277 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:53:33,278 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(3091): Received CLOSE for 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:33,278 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T00:53:33,278 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T00:53:33,278 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(959): stopping server 3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:33,278 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:53:33,278 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T00:53:33,278 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:33,279 INFO [RS:2;3e92e60d7d96:37867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3e92e60d7d96:37867. 2024-12-07T00:53:33,279 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T00:53:33,279 DEBUG [RS:2;3e92e60d7d96:37867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:33,279 DEBUG [RS:2;3e92e60d7d96:37867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,279 DEBUG [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T00:53:33,279 INFO [RS:1;3e92e60d7d96:33661 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:53:33,279 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4c22b3ff3ef7d941482960f78cdc460e, disabling compactions & flushes 2024-12-07T00:53:33,279 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T00:53:33,279 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1325): Online Regions={4c22b3ff3ef7d941482960f78cdc460e=TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e.} 2024-12-07T00:53:33,279 INFO [RS:1;3e92e60d7d96:33661 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T00:53:33,279 INFO [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,279 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(959): stopping server 3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:33,279 DEBUG [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1351): Waiting on 4c22b3ff3ef7d941482960f78cdc460e 2024-12-07T00:53:33,279 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,279 INFO [RS:1;3e92e60d7d96:33661 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3e92e60d7d96:33661. 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. after waiting 0 ms 2024-12-07T00:53:33,279 DEBUG [RS:1;3e92e60d7d96:33661 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:33,279 DEBUG [RS:1;3e92e60d7d96:33661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,279 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,280 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T00:53:33,280 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(976): stopping server 3e92e60d7d96,33661,1733532809752; all regions closed. 2024-12-07T00:53:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_1073741828_1018 (size=93) 2024-12-07T00:53:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_1073741828_1018 (size=93) 2024-12-07T00:53:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741828_1018 (size=93) 2024-12-07T00:53:33,292 INFO [regionserver/3e92e60d7d96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:33,292 INFO [regionserver/3e92e60d7d96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:33,293 DEBUG [RS:1;3e92e60d7d96:33661 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs 2024-12-07T00:53:33,294 INFO [RS:1;3e92e60d7d96:33661 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e92e60d7d96%2C33661%2C1733532809752:(num 1733532811244) 2024-12-07T00:53:33,294 DEBUG [RS:1;3e92e60d7d96:33661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,294 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:33,294 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:33,294 INFO [regionserver/3e92e60d7d96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:33,294 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.ChoreService(370): Chore service for: regionserver/3e92e60d7d96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:33,294 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:53:33,295 INFO [regionserver/3e92e60d7d96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:33,295 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:53:33,295 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:53:33,295 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:33,295 INFO [RS:1;3e92e60d7d96:33661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33661 2024-12-07T00:53:33,302 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/default/TestHBaseWalOnEC/4c22b3ff3ef7d941482960f78cdc460e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:53:33,304 INFO [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,304 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4c22b3ff3ef7d941482960f78cdc460e: Waiting for close lock at 1733532813279Running coprocessor pre-close hooks at 1733532813279Disabling compacts and flushes for region at 1733532813279Disabling writes for close at 1733532813279Writing region close event to WAL at 1733532813281 (+2 ms)Running coprocessor post-close hooks at 1733532813303 (+22 ms)Closed at 1733532813304 (+1 ms) 2024-12-07T00:53:33,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e92e60d7d96,33661,1733532809752 2024-12-07T00:53:33,305 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:33,305 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e. 2024-12-07T00:53:33,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:53:33,310 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/info/78f8937fa99741edaaac2ef1f3e29c43 is 153, key is TestHBaseWalOnEC,,1733532812220.4c22b3ff3ef7d941482960f78cdc460e./info:regioninfo/1733532812658/Put/seqid=0 2024-12-07T00:53:33,313 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,313 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,316 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e92e60d7d96,33661,1733532809752] 2024-12-07T00:53:33,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1165214945_22 at /127.0.0.1:60394 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60394 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-07T00:53:33,322 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,322 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/info/78f8937fa99741edaaac2ef1f3e29c43 2024-12-07T00:53:33,326 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e92e60d7d96,33661,1733532809752 already deleted, retry=false 2024-12-07T00:53:33,326 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e92e60d7d96,33661,1733532809752 expired; onlineServers=2 2024-12-07T00:53:33,347 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/ns/710f5b9623e34490a9e4d7c43396ada2 is 43, key is default/ns:d/1733532811957/Put/seqid=0 2024-12-07T00:53:33,349 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,350 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1165214945_22 at /127.0.0.1:60422 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60422 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-07T00:53:33,357 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,358 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/ns/710f5b9623e34490a9e4d7c43396ada2 2024-12-07T00:53:33,382 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/table/40a33a7cfad0462eaf53289622b0e171 is 52, key is TestHBaseWalOnEC/table:state/1733532812673/Put/seqid=0 2024-12-07T00:53:33,384 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,385 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,388 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1165214945_22 at /127.0.0.1:50136 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:38851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50136 dst: /127.0.0.1:38851 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-07T00:53:33,392 WARN [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,392 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/table/40a33a7cfad0462eaf53289622b0e171 2024-12-07T00:53:33,401 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/info/78f8937fa99741edaaac2ef1f3e29c43 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/info/78f8937fa99741edaaac2ef1f3e29c43 2024-12-07T00:53:33,411 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/info/78f8937fa99741edaaac2ef1f3e29c43, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T00:53:33,412 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/ns/710f5b9623e34490a9e4d7c43396ada2 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/ns/710f5b9623e34490a9e4d7c43396ada2 2024-12-07T00:53:33,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:33,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33661-0x101ad476fa50002, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:33,417 INFO [RS:1;3e92e60d7d96:33661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:33,417 INFO [RS:1;3e92e60d7d96:33661 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e92e60d7d96,33661,1733532809752; zookeeper connection closed. 2024-12-07T00:53:33,418 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3f59ca88 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3f59ca88 2024-12-07T00:53:33,422 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/ns/710f5b9623e34490a9e4d7c43396ada2, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T00:53:33,423 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/.tmp/table/40a33a7cfad0462eaf53289622b0e171 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/table/40a33a7cfad0462eaf53289622b0e171 2024-12-07T00:53:33,433 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/table/40a33a7cfad0462eaf53289622b0e171, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T00:53:33,435 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 156ms, sequenceid=11, compaction requested=false 2024-12-07T00:53:33,435 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T00:53:33,445 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T00:53:33,446 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T00:53:33,446 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T00:53:33,446 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733532813278Running coprocessor pre-close hooks at 1733532813279 (+1 ms)Disabling compacts and flushes for region at 1733532813279Disabling writes for close at 1733532813279Obtaining lock to block concurrent updates at 1733532813280 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733532813280Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733532813281 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733532813282 (+1 ms)Flushing 1588230740/info: creating writer at 1733532813282Flushing 1588230740/info: appending metadata at 1733532813308 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733532813308Flushing 1588230740/ns: creating writer at 1733532813332 (+24 ms)Flushing 1588230740/ns: appending metadata at 1733532813346 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733532813346Flushing 1588230740/table: creating writer at 1733532813366 (+20 ms)Flushing 1588230740/table: appending metadata at 1733532813381 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733532813381Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@607953eb: reopening flushed file at 1733532813400 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f528742: reopening flushed file at 1733532813411 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60bc0e0a: reopening flushed file at 1733532813422 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 156ms, sequenceid=11, compaction requested=false at 1733532813435 (+13 ms)Writing region close event to WAL at 1733532813438 (+3 ms)Running coprocessor post-close hooks at 1733532813446 (+8 ms)Closed at 1733532813446 2024-12-07T00:53:33,447 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T00:53:33,479 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(976): stopping server 3e92e60d7d96,37517,1733532809656; all regions closed. 2024-12-07T00:53:33,479 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(976): stopping server 3e92e60d7d96,37867,1733532809799; all regions closed. 2024-12-07T00:53:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_1073741827_1017 (size=1298) 2024-12-07T00:53:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_1073741827_1017 (size=1298) 2024-12-07T00:53:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741827_1017 (size=1298) 2024-12-07T00:53:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741829_1019 (size=2751) 2024-12-07T00:53:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_1073741829_1019 (size=2751) 2024-12-07T00:53:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_1073741829_1019 (size=2751) 2024-12-07T00:53:33,486 DEBUG [RS:2;3e92e60d7d96:37867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs 2024-12-07T00:53:33,486 INFO [RS:2;3e92e60d7d96:37867 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e92e60d7d96%2C37867%2C1733532809799:(num 1733532811244) 2024-12-07T00:53:33,486 DEBUG [RS:2;3e92e60d7d96:37867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,486 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:33,486 DEBUG [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs 2024-12-07T00:53:33,486 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:33,486 INFO [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e92e60d7d96%2C37517%2C1733532809656.meta:.meta(num 1733532811773) 2024-12-07T00:53:33,487 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.ChoreService(370): Chore service for: regionserver/3e92e60d7d96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:33,487 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:53:33,487 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:53:33,487 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:53:33,487 INFO [regionserver/3e92e60d7d96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:33,487 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:33,487 INFO [RS:2;3e92e60d7d96:37867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37867 2024-12-07T00:53:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_1073741826_1016 (size=93) 2024-12-07T00:53:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741826_1016 (size=93) 2024-12-07T00:53:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_1073741826_1016 (size=93) 2024-12-07T00:53:33,493 DEBUG [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/oldWALs 2024-12-07T00:53:33,493 INFO [RS:0;3e92e60d7d96:37517 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 3e92e60d7d96%2C37517%2C1733532809656:(num 1733532811241) 2024-12-07T00:53:33,493 DEBUG [RS:0;3e92e60d7d96:37517 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:33,493 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:33,493 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:33,493 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.ChoreService(370): Chore service for: regionserver/3e92e60d7d96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:33,494 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:33,494 INFO [regionserver/3e92e60d7d96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:33,494 INFO [RS:0;3e92e60d7d96:37517 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37517 2024-12-07T00:53:33,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e92e60d7d96,37867,1733532809799 2024-12-07T00:53:33,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:53:33,495 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:33,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e92e60d7d96,37517,1733532809656 2024-12-07T00:53:33,505 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:33,505 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e92e60d7d96,37517,1733532809656] 2024-12-07T00:53:33,526 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e92e60d7d96,37517,1733532809656 already deleted, retry=false 2024-12-07T00:53:33,526 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e92e60d7d96,37517,1733532809656 expired; onlineServers=1 2024-12-07T00:53:33,526 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e92e60d7d96,37867,1733532809799] 2024-12-07T00:53:33,537 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e92e60d7d96,37867,1733532809799 already deleted, retry=false 2024-12-07T00:53:33,537 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e92e60d7d96,37867,1733532809799 expired; onlineServers=0 2024-12-07T00:53:33,537 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3e92e60d7d96,35431,1733532808947' ***** 2024-12-07T00:53:33,537 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T00:53:33,537 INFO [M:0;3e92e60d7d96:35431 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:33,538 INFO [M:0;3e92e60d7d96:35431 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:33,538 DEBUG [M:0;3e92e60d7d96:35431 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T00:53:33,538 DEBUG [M:0;3e92e60d7d96:35431 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T00:53:33,538 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T00:53:33,538 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.small.0-1733532810932 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.small.0-1733532810932,5,FailOnTimeoutGroup] 2024-12-07T00:53:33,538 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.large.0-1733532810922 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.large.0-1733532810922,5,FailOnTimeoutGroup] 2024-12-07T00:53:33,539 INFO [M:0;3e92e60d7d96:35431 {}] hbase.ChoreService(370): Chore service for: master/3e92e60d7d96:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:33,539 INFO [M:0;3e92e60d7d96:35431 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:33,539 DEBUG [M:0;3e92e60d7d96:35431 {}] master.HMaster(1795): Stopping service threads 2024-12-07T00:53:33,539 INFO [M:0;3e92e60d7d96:35431 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T00:53:33,540 INFO [M:0;3e92e60d7d96:35431 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T00:53:33,541 INFO [M:0;3e92e60d7d96:35431 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T00:53:33,542 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T00:53:33,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:33,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:33,547 DEBUG [M:0;3e92e60d7d96:35431 {}] zookeeper.ZKUtil(347): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T00:53:33,547 WARN [M:0;3e92e60d7d96:35431 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T00:53:33,548 INFO [M:0;3e92e60d7d96:35431 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/.lastflushedseqids 2024-12-07T00:53:33,558 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,559 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:42628 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:45537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42628 dst: /127.0.0.1:45537 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775584_1033 (size=137) 2024-12-07T00:53:33,565 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,565 INFO [M:0;3e92e60d7d96:35431 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T00:53:33,566 INFO [M:0;3e92e60d7d96:35431 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T00:53:33,566 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T00:53:33,566 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:33,566 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:33,566 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T00:53:33,566 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:33,566 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-12-07T00:53:33,585 DEBUG [M:0;3e92e60d7d96:35431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab943ead7499413dad22d29ae8c2fef9 is 82, key is hbase:meta,,1/info:regioninfo/1733532811854/Put/seqid=0 2024-12-07T00:53:33,587 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,587 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:60444 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60444 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-07T00:53:33,595 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,596 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab943ead7499413dad22d29ae8c2fef9 2024-12-07T00:53:33,616 INFO [RS:2;3e92e60d7d96:37867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:33,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:33,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37867-0x101ad476fa50003, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:33,616 INFO [RS:2;3e92e60d7d96:37867 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e92e60d7d96,37867,1733532809799; zookeeper connection closed. 2024-12-07T00:53:33,616 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3752ef8a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3752ef8a 2024-12-07T00:53:33,621 DEBUG [M:0;3e92e60d7d96:35431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bdcb0b0d0b7842cfbd09144f63b57998 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733532812679/Put/seqid=0 2024-12-07T00:53:33,623 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,623 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,626 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:50160 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:38851:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50160 dst: /127.0.0.1:38851 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:33,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37517-0x101ad476fa50001, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:33,626 INFO [RS:0;3e92e60d7d96:37517 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:33,626 INFO [RS:0;3e92e60d7d96:37517 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e92e60d7d96,37517,1733532809656; zookeeper connection closed. 2024-12-07T00:53:33,627 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68cb1787 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68cb1787 2024-12-07T00:53:33,628 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T00:53:33,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775552_1037 (size=6437) 2024-12-07T00:53:33,631 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:33,631 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bdcb0b0d0b7842cfbd09144f63b57998 2024-12-07T00:53:33,654 DEBUG [M:0;3e92e60d7d96:35431 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b7471b50ae1418c98102619d2e19003 is 69, key is 3e92e60d7d96,33661,1733532809752/rs:state/1733532810976/Put/seqid=0 2024-12-07T00:53:33,657 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,657 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-07T00:53:33,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-512590360_22 at /127.0.0.1:60460 [Receiving block BP-50809909-172.17.0.2-1733532804262:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:43817:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60460 dst: /127.0.0.1:43817 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:53:33,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-07T00:53:33,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-07T00:53:33,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-07T00:53:33,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-07T00:53:33,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-07T00:53:33,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-07T00:53:33,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-07T00:53:33,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-07T00:53:33,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-07T00:53:34,066 WARN [M:0;3e92e60d7d96:35431 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-07T00:53:34,067 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b7471b50ae1418c98102619d2e19003 2024-12-07T00:53:34,080 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ab943ead7499413dad22d29ae8c2fef9 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab943ead7499413dad22d29ae8c2fef9 2024-12-07T00:53:34,087 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ab943ead7499413dad22d29ae8c2fef9, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T00:53:34,089 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bdcb0b0d0b7842cfbd09144f63b57998 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bdcb0b0d0b7842cfbd09144f63b57998 2024-12-07T00:53:34,096 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bdcb0b0d0b7842cfbd09144f63b57998, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T00:53:34,098 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b7471b50ae1418c98102619d2e19003 as hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b7471b50ae1418c98102619d2e19003 2024-12-07T00:53:34,106 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b7471b50ae1418c98102619d2e19003, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T00:53:34,107 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 541ms, sequenceid=72, compaction requested=false 2024-12-07T00:53:34,109 INFO [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:34,109 DEBUG [M:0;3e92e60d7d96:35431 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733532813566Disabling compacts and flushes for region at 1733532813566Disabling writes for close at 1733532813566Obtaining lock to block concurrent updates at 1733532813566Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733532813566Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1733532813567 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733532813567Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733532813568 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733532813584 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733532813584Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733532813604 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733532813620 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733532813620Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733532813639 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733532813654 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733532813654Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68f9eab2: reopening flushed file at 1733532814079 (+425 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42d257ad: reopening flushed file at 1733532814087 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d8bd946: reopening flushed file at 1733532814096 (+9 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 541ms, sequenceid=72, compaction requested=false at 1733532814107 (+11 ms)Writing region close event to WAL at 1733532814109 (+2 ms)Closed at 1733532814109 2024-12-07T00:53:34,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38851 is added to blk_1073741825_1011 (size=32653) 2024-12-07T00:53:34,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45537 is added to blk_1073741825_1011 (size=32653) 2024-12-07T00:53:34,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43817 is added to blk_1073741825_1011 (size=32653) 2024-12-07T00:53:34,113 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:34,114 INFO [M:0;3e92e60d7d96:35431 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T00:53:34,114 INFO [M:0;3e92e60d7d96:35431 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35431 2024-12-07T00:53:34,114 INFO [M:0;3e92e60d7d96:35431 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:34,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:34,258 INFO [M:0;3e92e60d7d96:35431 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:34,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35431-0x101ad476fa50000, quorum=127.0.0.1:60194, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:34,304 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:34,308 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:34,308 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:34,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:34,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:34,313 WARN [BP-50809909-172.17.0.2-1733532804262 heartbeating to localhost/127.0.0.1:42801 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:53:34,313 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:53:34,313 WARN [BP-50809909-172.17.0.2-1733532804262 heartbeating to localhost/127.0.0.1:42801 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-50809909-172.17.0.2-1733532804262 (Datanode Uuid 0ff54060-3a02-49a5-954a-34ba79a54e5a) service to localhost/127.0.0.1:42801 2024-12-07T00:53:34,313 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:53:34,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data5/current/BP-50809909-172.17.0.2-1733532804262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:34,315 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data6/current/BP-50809909-172.17.0.2-1733532804262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:34,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:53:34,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:34,318 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:34,318 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:34,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:34,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:34,319 WARN [BP-50809909-172.17.0.2-1733532804262 heartbeating to localhost/127.0.0.1:42801 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:53:34,319 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:53:34,319 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:53:34,319 WARN [BP-50809909-172.17.0.2-1733532804262 heartbeating to localhost/127.0.0.1:42801 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-50809909-172.17.0.2-1733532804262 (Datanode Uuid 72911d0a-641e-4672-a815-3706d7302373) service to localhost/127.0.0.1:42801 2024-12-07T00:53:34,320 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data3/current/BP-50809909-172.17.0.2-1733532804262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:34,320 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data4/current/BP-50809909-172.17.0.2-1733532804262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:34,320 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:53:34,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:34,325 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:34,325 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:34,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:34,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:34,327 WARN [BP-50809909-172.17.0.2-1733532804262 heartbeating to localhost/127.0.0.1:42801 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:53:34,327 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:53:34,327 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:53:34,327 WARN [BP-50809909-172.17.0.2-1733532804262 heartbeating to localhost/127.0.0.1:42801 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-50809909-172.17.0.2-1733532804262 (Datanode Uuid fa30874e-e4de-4639-8071-c548b624a18b) service to localhost/127.0.0.1:42801 2024-12-07T00:53:34,328 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data1/current/BP-50809909-172.17.0.2-1733532804262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:34,328 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/cluster_896df829-bae6-ec42-2bcd-fe7410eb13d9/data/data2/current/BP-50809909-172.17.0.2-1733532804262 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:34,328 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:53:34,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T00:53:34,338 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:34,338 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:34,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:34,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:34,347 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T00:53:34,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T00:53:34,379 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 157), OpenFileDescriptor=441 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=225 (was 212) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7793 (was 8050) 2024-12-07T00:53:34,384 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=441, MaxFileDescriptor=1048576, SystemLoadAverage=225, ProcessCount=11, AvailableMemoryMB=7793 2024-12-07T00:53:34,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.log.dir so I do NOT create it in target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/bc1e14a5-c6bf-5647-1085-7e073694bd39/hadoop.tmp.dir so I do NOT create it in target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b, deleteOnExit=true 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/test.cache.data in system properties and HBase conf 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir in system properties and HBase conf 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T00:53:34,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T00:53:34,386 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:53:34,386 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:53:34,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T00:53:34,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/nfs.dump.dir in system properties and HBase conf 2024-12-07T00:53:34,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/java.io.tmpdir in system properties and HBase conf 2024-12-07T00:53:34,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:53:34,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T00:53:34,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T00:53:34,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:34,754 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:34,755 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:34,755 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:34,755 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:53:34,756 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:34,756 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f854cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:34,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16eaa68d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:34,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ffa125c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/java.io.tmpdir/jetty-localhost-36913-hadoop-hdfs-3_4_1-tests_jar-_-any-3016949992216224155/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T00:53:34,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aa18531{HTTP/1.1, (http/1.1)}{localhost:36913} 2024-12-07T00:53:34,848 INFO [Time-limited test {}] server.Server(415): Started @12233ms 2024-12-07T00:53:35,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:35,149 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:35,150 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:35,150 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:35,150 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:53:35,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@137179d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:35,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61d23bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:35,239 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@700f39d7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/java.io.tmpdir/jetty-localhost-41857-hadoop-hdfs-3_4_1-tests_jar-_-any-11093247706984462004/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:35,239 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e9ae4fc{HTTP/1.1, (http/1.1)}{localhost:41857} 2024-12-07T00:53:35,239 INFO [Time-limited test {}] server.Server(415): Started @12625ms 2024-12-07T00:53:35,241 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:53:35,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:35,282 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:35,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:35,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:35,283 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:53:35,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c597470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:35,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e5afbc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:35,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f8d2ee2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/java.io.tmpdir/jetty-localhost-41159-hadoop-hdfs-3_4_1-tests_jar-_-any-13620946254918159596/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:35,376 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6beabb01{HTTP/1.1, (http/1.1)}{localhost:41159} 2024-12-07T00:53:35,376 INFO [Time-limited test {}] server.Server(415): Started @12762ms 2024-12-07T00:53:35,378 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:53:35,410 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:53:35,413 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:53:35,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:53:35,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:53:35,414 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:53:35,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73f6422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:53:35,417 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c77de1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:53:35,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e89cb0b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/java.io.tmpdir/jetty-localhost-45775-hadoop-hdfs-3_4_1-tests_jar-_-any-5347647707348424539/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:35,509 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b3c8c82{HTTP/1.1, (http/1.1)}{localhost:45775} 2024-12-07T00:53:35,509 INFO [Time-limited test {}] server.Server(415): Started @12894ms 2024-12-07T00:53:35,510 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:53:36,441 WARN [Thread-559 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data1/current/BP-871738338-172.17.0.2-1733532814410/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:36,441 WARN [Thread-560 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data2/current/BP-871738338-172.17.0.2-1733532814410/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:36,457 WARN [Thread-500 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:53:36,459 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb1b5de9a1744189 with lease ID 0x21f5d7af08456dd6: Processing first storage report for DS-03cb5916-4406-412d-b3f6-ffcb49464079 from datanode DatanodeRegistration(127.0.0.1:41121, datanodeUuid=efb9f2e9-030f-4934-beb2-301326d5da03, infoPort=38655, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410) 2024-12-07T00:53:36,459 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb1b5de9a1744189 with lease ID 0x21f5d7af08456dd6: from storage DS-03cb5916-4406-412d-b3f6-ffcb49464079 node DatanodeRegistration(127.0.0.1:41121, datanodeUuid=efb9f2e9-030f-4934-beb2-301326d5da03, infoPort=38655, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:36,459 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xeb1b5de9a1744189 with lease ID 0x21f5d7af08456dd6: Processing first storage report for DS-a05014d8-e12a-4475-b0c3-0623b493af82 from datanode DatanodeRegistration(127.0.0.1:41121, datanodeUuid=efb9f2e9-030f-4934-beb2-301326d5da03, infoPort=38655, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410) 2024-12-07T00:53:36,460 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb1b5de9a1744189 with lease ID 0x21f5d7af08456dd6: from storage DS-a05014d8-e12a-4475-b0c3-0623b493af82 node DatanodeRegistration(127.0.0.1:41121, datanodeUuid=efb9f2e9-030f-4934-beb2-301326d5da03, infoPort=38655, infoSecurePort=0, ipcPort=42825, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:36,815 WARN [Thread-571 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data3/current/BP-871738338-172.17.0.2-1733532814410/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:36,815 WARN [Thread-572 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data4/current/BP-871738338-172.17.0.2-1733532814410/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:36,830 WARN [Thread-523 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:53:36,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58d31f9077de384a with lease ID 0x21f5d7af08456dd7: Processing first storage report for DS-57bb2a22-2d0e-42d4-a142-09b127200264 from datanode DatanodeRegistration(127.0.0.1:37499, datanodeUuid=2290421d-9a84-465e-986a-079573315aa1, infoPort=34583, infoSecurePort=0, ipcPort=43491, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410) 2024-12-07T00:53:36,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58d31f9077de384a with lease ID 0x21f5d7af08456dd7: from storage DS-57bb2a22-2d0e-42d4-a142-09b127200264 node DatanodeRegistration(127.0.0.1:37499, datanodeUuid=2290421d-9a84-465e-986a-079573315aa1, infoPort=34583, infoSecurePort=0, ipcPort=43491, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T00:53:36,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x58d31f9077de384a with lease ID 0x21f5d7af08456dd7: Processing first storage report for DS-bf1522b0-5fdd-493e-bccf-812c30511e1a from datanode DatanodeRegistration(127.0.0.1:37499, datanodeUuid=2290421d-9a84-465e-986a-079573315aa1, infoPort=34583, infoSecurePort=0, ipcPort=43491, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410) 2024-12-07T00:53:36,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x58d31f9077de384a with lease ID 0x21f5d7af08456dd7: from storage DS-bf1522b0-5fdd-493e-bccf-812c30511e1a node DatanodeRegistration(127.0.0.1:37499, datanodeUuid=2290421d-9a84-465e-986a-079573315aa1, infoPort=34583, infoSecurePort=0, ipcPort=43491, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:36,848 WARN [Thread-582 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data5/current/BP-871738338-172.17.0.2-1733532814410/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:36,848 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data6/current/BP-871738338-172.17.0.2-1733532814410/current, will proceed with Du for space computation calculation, 2024-12-07T00:53:36,866 WARN [Thread-545 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:53:36,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6f7243b13028ce3 with lease ID 0x21f5d7af08456dd8: Processing first storage report for DS-24aead55-3d82-467a-af96-051ccb923d93 from datanode DatanodeRegistration(127.0.0.1:39827, datanodeUuid=f1fefc16-19c2-40a3-9dba-eb9db4a16d4a, infoPort=45005, infoSecurePort=0, ipcPort=39771, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410) 2024-12-07T00:53:36,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6f7243b13028ce3 with lease ID 0x21f5d7af08456dd8: from storage DS-24aead55-3d82-467a-af96-051ccb923d93 node DatanodeRegistration(127.0.0.1:39827, datanodeUuid=f1fefc16-19c2-40a3-9dba-eb9db4a16d4a, infoPort=45005, infoSecurePort=0, ipcPort=39771, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:36,869 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6f7243b13028ce3 with lease ID 0x21f5d7af08456dd8: Processing first storage report for DS-589d6f28-bc9d-4270-8650-c1d2867c4f56 from datanode DatanodeRegistration(127.0.0.1:39827, datanodeUuid=f1fefc16-19c2-40a3-9dba-eb9db4a16d4a, infoPort=45005, infoSecurePort=0, ipcPort=39771, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410) 2024-12-07T00:53:36,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6f7243b13028ce3 with lease ID 0x21f5d7af08456dd8: from storage DS-589d6f28-bc9d-4270-8650-c1d2867c4f56 node DatanodeRegistration(127.0.0.1:39827, datanodeUuid=f1fefc16-19c2-40a3-9dba-eb9db4a16d4a, infoPort=45005, infoSecurePort=0, ipcPort=39771, storageInfo=lv=-57;cid=testClusterID;nsid=218703700;c=1733532814410), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:53:36,956 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5 2024-12-07T00:53:36,961 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/zookeeper_0, clientPort=53037, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T00:53:36,963 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53037 2024-12-07T00:53:36,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:36,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741825_1001 (size=7) 2024-12-07T00:53:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741825_1001 (size=7) 2024-12-07T00:53:36,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741825_1001 (size=7) 2024-12-07T00:53:36,982 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6 with version=8 2024-12-07T00:53:36,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42801/user/jenkins/test-data/ca3e439d-0993-450f-7236-e58b9ba487da/hbase-staging 2024-12-07T00:53:36,984 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:36,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:36,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:36,985 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:36,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:36,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:36,985 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T00:53:36,985 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:36,985 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35309 2024-12-07T00:53:36,987 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35309 connecting to ZooKeeper ensemble=127.0.0.1:53037 2024-12-07T00:53:37,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:353090x0, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:37,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35309-0x101ad4791eb0000 connected 2024-12-07T00:53:37,148 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,153 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:37,160 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6, hbase.cluster.distributed=false 2024-12-07T00:53:37,162 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:37,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35309 2024-12-07T00:53:37,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35309 2024-12-07T00:53:37,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35309 2024-12-07T00:53:37,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35309 2024-12-07T00:53:37,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35309 2024-12-07T00:53:37,178 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:37,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,178 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:37,178 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,179 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:37,179 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:53:37,179 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:37,179 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39091 2024-12-07T00:53:37,181 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39091 connecting to ZooKeeper ensemble=127.0.0.1:53037 2024-12-07T00:53:37,182 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,183 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390910x0, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:37,199 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:37,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39091-0x101ad4791eb0001 connected 2024-12-07T00:53:37,199 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:53:37,200 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:53:37,201 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:53:37,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:37,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39091 2024-12-07T00:53:37,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39091 2024-12-07T00:53:37,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39091 2024-12-07T00:53:37,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39091 2024-12-07T00:53:37,205 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39091 2024-12-07T00:53:37,221 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:53:37,222 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:37,223 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36425 2024-12-07T00:53:37,224 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36425 connecting to ZooKeeper ensemble=127.0.0.1:53037 2024-12-07T00:53:37,225 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364250x0, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:37,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36425-0x101ad4791eb0002 connected 2024-12-07T00:53:37,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:37,243 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:53:37,243 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:53:37,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:53:37,245 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:37,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36425 2024-12-07T00:53:37,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36425 2024-12-07T00:53:37,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36425 2024-12-07T00:53:37,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36425 2024-12-07T00:53:37,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36425 2024-12-07T00:53:37,264 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3e92e60d7d96:0 server-side Connection retries=45 2024-12-07T00:53:37,264 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,264 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,264 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:53:37,264 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:53:37,264 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:53:37,264 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:53:37,265 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:53:37,265 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46203 2024-12-07T00:53:37,266 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46203 connecting to ZooKeeper ensemble=127.0.0.1:53037 2024-12-07T00:53:37,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462030x0, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:53:37,284 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46203-0x101ad4791eb0003 connected 2024-12-07T00:53:37,284 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:37,285 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:53:37,285 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:53:37,286 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:53:37,288 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:53:37,288 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46203 2024-12-07T00:53:37,288 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46203 2024-12-07T00:53:37,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46203 2024-12-07T00:53:37,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46203 2024-12-07T00:53:37,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46203 2024-12-07T00:53:37,301 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3e92e60d7d96:35309 2024-12-07T00:53:37,301 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,315 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,317 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:37,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,327 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T00:53:37,327 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3e92e60d7d96,35309,1733532816984 from backup master directory 2024-12-07T00:53:37,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,336 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:37,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:53:37,336 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,342 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/hbase.id] with ID: 2f86050c-bc3c-4857-92da-3237eca1ba25 2024-12-07T00:53:37,342 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/.tmp/hbase.id 2024-12-07T00:53:37,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741826_1002 (size=42) 2024-12-07T00:53:37,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741826_1002 (size=42) 2024-12-07T00:53:37,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741826_1002 (size=42) 2024-12-07T00:53:37,352 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/.tmp/hbase.id]:[hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/hbase.id] 2024-12-07T00:53:37,370 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:53:37,370 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T00:53:37,372 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-07T00:53:37,377 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T00:53:37,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T00:53:37,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741827_1003 (size=196) 2024-12-07T00:53:37,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741827_1003 (size=196) 2024-12-07T00:53:37,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741827_1003 (size=196) 2024-12-07T00:53:37,412 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:53:37,412 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T00:53:37,413 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T00:53:37,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741828_1004 (size=1189) 2024-12-07T00:53:37,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741828_1004 (size=1189) 2024-12-07T00:53:37,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741828_1004 (size=1189) 2024-12-07T00:53:37,427 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store 2024-12-07T00:53:37,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741829_1005 (size=34) 2024-12-07T00:53:37,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741829_1005 (size=34) 2024-12-07T00:53:37,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741829_1005 (size=34) 2024-12-07T00:53:37,437 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:37,437 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T00:53:37,437 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:37,437 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:37,437 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T00:53:37,437 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:37,437 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:37,438 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733532817437Disabling compacts and flushes for region at 1733532817437Disabling writes for close at 1733532817437Writing region close event to WAL at 1733532817437Closed at 1733532817437 2024-12-07T00:53:37,439 WARN [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/.initializing 2024-12-07T00:53:37,439 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/WALs/3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,443 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C35309%2C1733532816984, suffix=, logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/WALs/3e92e60d7d96,35309,1733532816984, archiveDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/oldWALs, maxLogs=10 2024-12-07T00:53:37,444 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e92e60d7d96%2C35309%2C1733532816984.1733532817443 2024-12-07T00:53:37,454 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/WALs/3e92e60d7d96,35309,1733532816984/3e92e60d7d96%2C35309%2C1733532816984.1733532817443 2024-12-07T00:53:37,459 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38655:38655),(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:45005:45005)] 2024-12-07T00:53:37,460 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:53:37,460 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:37,461 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,461 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T00:53:37,466 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:37,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T00:53:37,469 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:37,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T00:53:37,473 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:37,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T00:53:37,475 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:37,476 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,477 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,477 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,479 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,479 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,479 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:53:37,481 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:53:37,483 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:53:37,484 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64637626, jitterRate=-0.036824315786361694}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:53:37,484 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733532817461Initializing all the Stores at 1733532817462 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532817462Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532817464 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532817464Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532817464Cleaning up temporary data from old regions at 1733532817479 (+15 ms)Region opened successfully at 1733532817484 (+5 ms) 2024-12-07T00:53:37,486 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T00:53:37,491 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2461dd02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:37,492 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T00:53:37,492 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T00:53:37,492 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T00:53:37,493 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T00:53:37,493 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T00:53:37,494 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T00:53:37,494 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T00:53:37,498 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T00:53:37,499 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T00:53:37,526 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T00:53:37,526 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T00:53:37,527 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T00:53:37,536 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T00:53:37,537 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T00:53:37,538 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T00:53:37,547 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T00:53:37,548 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T00:53:37,557 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T00:53:37,560 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T00:53:37,568 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T00:53:37,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:37,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:37,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:37,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:37,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,579 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3e92e60d7d96,35309,1733532816984, sessionid=0x101ad4791eb0000, setting cluster-up flag (Was=false) 2024-12-07T00:53:37,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,631 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T00:53:37,632 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:37,684 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T00:53:37,686 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:37,689 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T00:53:37,693 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(746): ClusterId : 2f86050c-bc3c-4857-92da-3237eca1ba25 2024-12-07T00:53:37,693 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:53:37,693 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(746): ClusterId : 2f86050c-bc3c-4857-92da-3237eca1ba25 2024-12-07T00:53:37,694 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:53:37,694 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(746): ClusterId : 2f86050c-bc3c-4857-92da-3237eca1ba25 2024-12-07T00:53:37,694 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:37,694 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:53:37,695 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T00:53:37,695 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T00:53:37,695 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3e92e60d7d96,35309,1733532816984 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T00:53:37,716 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:53:37,716 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:53:37,716 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:53:37,716 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:37,716 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:53:37,716 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:53:37,716 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:53:37,716 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:37,716 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:37,717 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:53:37,717 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3e92e60d7d96:0, corePoolSize=10, maxPoolSize=10 2024-12-07T00:53:37,717 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,717 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:37,717 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733532847718 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T00:53:37,718 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,719 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T00:53:37,719 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T00:53:37,719 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T00:53:37,720 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:37,720 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T00:53:37,720 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T00:53:37,720 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T00:53:37,720 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.large.0-1733532817720,5,FailOnTimeoutGroup] 2024-12-07T00:53:37,720 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.small.0-1733532817720,5,FailOnTimeoutGroup] 2024-12-07T00:53:37,720 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,720 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T00:53:37,721 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,721 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,721 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,722 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T00:53:37,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741831_1007 (size=1321) 2024-12-07T00:53:37,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741831_1007 (size=1321) 2024-12-07T00:53:37,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741831_1007 (size=1321) 2024-12-07T00:53:37,734 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T00:53:37,734 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6 2024-12-07T00:53:37,737 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:53:37,737 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:53:37,737 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:53:37,737 DEBUG [RS:0;3e92e60d7d96:39091 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a76baaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:37,737 DEBUG [RS:2;3e92e60d7d96:46203 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f8e971a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:37,737 DEBUG [RS:1;3e92e60d7d96:36425 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dec806, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3e92e60d7d96/172.17.0.2:0 2024-12-07T00:53:37,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741832_1008 (size=32) 2024-12-07T00:53:37,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741832_1008 (size=32) 2024-12-07T00:53:37,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741832_1008 (size=32) 2024-12-07T00:53:37,751 DEBUG [RS:0;3e92e60d7d96:39091 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3e92e60d7d96:39091 2024-12-07T00:53:37,751 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T00:53:37,751 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T00:53:37,751 DEBUG [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T00:53:37,752 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3e92e60d7d96:36425 2024-12-07T00:53:37,752 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T00:53:37,752 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T00:53:37,752 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T00:53:37,752 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e92e60d7d96,35309,1733532816984 with port=39091, startcode=1733532817178 2024-12-07T00:53:37,752 DEBUG [RS:0;3e92e60d7d96:39091 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:53:37,753 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e92e60d7d96,35309,1733532816984 with port=36425, startcode=1733532817221 2024-12-07T00:53:37,753 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;3e92e60d7d96:46203 2024-12-07T00:53:37,753 DEBUG [RS:1;3e92e60d7d96:36425 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:53:37,753 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T00:53:37,753 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T00:53:37,753 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T00:53:37,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:37,754 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(2659): reportForDuty to master=3e92e60d7d96,35309,1733532816984 with port=46203, startcode=1733532817264 2024-12-07T00:53:37,754 DEBUG [RS:2;3e92e60d7d96:46203 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:53:37,755 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57255, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:53:37,755 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51003, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:53:37,756 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33155, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:53:37,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T00:53:37,756 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35309 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:37,756 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35309 {}] master.ServerManager(517): Registering regionserver=3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:37,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T00:53:37,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35309 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:37,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35309 {}] master.ServerManager(517): Registering regionserver=3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:37,759 DEBUG [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6 2024-12-07T00:53:37,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:37,759 DEBUG [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39499 2024-12-07T00:53:37,759 DEBUG [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T00:53:37,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T00:53:37,761 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T00:53:37,761 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35309 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:37,761 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,761 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35309 {}] master.ServerManager(517): Registering regionserver=3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:37,761 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6 2024-12-07T00:53:37,761 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39499 2024-12-07T00:53:37,761 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T00:53:37,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:37,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T00:53:37,763 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6 2024-12-07T00:53:37,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T00:53:37,764 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39499 2024-12-07T00:53:37,764 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T00:53:37,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:37,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T00:53:37,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T00:53:37,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:37,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:37,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T00:53:37,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:53:37,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740 2024-12-07T00:53:37,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740 2024-12-07T00:53:37,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T00:53:37,770 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T00:53:37,771 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:53:37,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T00:53:37,775 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:53:37,775 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68388067, jitterRate=0.019061610102653503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:53:37,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733532817754Initializing all the Stores at 1733532817755 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532817755Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532817755Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532817755Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532817755Cleaning up temporary data from old regions at 1733532817770 (+15 ms)Region opened successfully at 1733532817776 (+6 ms) 2024-12-07T00:53:37,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T00:53:37,776 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T00:53:37,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T00:53:37,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T00:53:37,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T00:53:37,777 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T00:53:37,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733532817776Disabling compacts and flushes for region at 1733532817776Disabling writes for close at 1733532817776Writing region close event to WAL at 1733532817777 (+1 ms)Closed at 1733532817777 2024-12-07T00:53:37,778 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:37,778 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T00:53:37,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T00:53:37,780 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T00:53:37,782 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T00:53:37,840 DEBUG [RS:0;3e92e60d7d96:39091 {}] zookeeper.ZKUtil(111): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:37,841 WARN [RS:0;3e92e60d7d96:39091 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:37,841 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e92e60d7d96,46203,1733532817264] 2024-12-07T00:53:37,841 INFO [RS:0;3e92e60d7d96:39091 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T00:53:37,841 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e92e60d7d96,36425,1733532817221] 2024-12-07T00:53:37,841 DEBUG [RS:2;3e92e60d7d96:46203 {}] zookeeper.ZKUtil(111): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:37,841 DEBUG [RS:1;3e92e60d7d96:36425 {}] zookeeper.ZKUtil(111): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:37,841 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3e92e60d7d96,39091,1733532817178] 2024-12-07T00:53:37,841 DEBUG [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:37,841 WARN [RS:1;3e92e60d7d96:36425 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:37,841 WARN [RS:2;3e92e60d7d96:46203 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:53:37,842 INFO [RS:2;3e92e60d7d96:46203 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T00:53:37,842 INFO [RS:1;3e92e60d7d96:36425 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T00:53:37,842 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:37,842 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:37,848 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:53:37,848 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:53:37,848 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:53:37,850 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:53:37,851 INFO [RS:0;3e92e60d7d96:39091 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:53:37,851 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,851 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T00:53:37,852 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:53:37,852 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T00:53:37,853 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,853 INFO [RS:2;3e92e60d7d96:46203 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,853 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,854 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:37,854 DEBUG [RS:0;3e92e60d7d96:39091 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:37,856 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T00:53:37,857 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:53:37,857 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T00:53:37,857 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,857 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,857 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:37,858 DEBUG [RS:2;3e92e60d7d96:46203 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:37,859 INFO [RS:1;3e92e60d7d96:36425 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:53:37,860 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T00:53:37,860 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,39091,1733532817178-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,860 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,46203,1733532817264-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:37,861 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T00:53:37,861 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3e92e60d7d96:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,861 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,862 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,862 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,862 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,862 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3e92e60d7d96:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:53:37,862 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:37,862 DEBUG [RS:1;3e92e60d7d96:36425 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:53:37,862 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,862 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,862 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,862 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,862 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,862 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,36425,1733532817221-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:37,873 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:53:37,873 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:53:37,873 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,39091,1733532817178-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,873 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,46203,1733532817264-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,873 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,873 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,873 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.Replication(171): 3e92e60d7d96,46203,1733532817264 started 2024-12-07T00:53:37,873 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.Replication(171): 3e92e60d7d96,39091,1733532817178 started 2024-12-07T00:53:37,874 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:53:37,874 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,36425,1733532817221-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,874 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,875 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.Replication(171): 3e92e60d7d96,36425,1733532817221 started 2024-12-07T00:53:37,885 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,885 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,885 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1482): Serving as 3e92e60d7d96,46203,1733532817264, RpcServer on 3e92e60d7d96/172.17.0.2:46203, sessionid=0x101ad4791eb0003 2024-12-07T00:53:37,885 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(1482): Serving as 3e92e60d7d96,39091,1733532817178, RpcServer on 3e92e60d7d96/172.17.0.2:39091, sessionid=0x101ad4791eb0001 2024-12-07T00:53:37,885 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:53:37,885 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:53:37,885 DEBUG [RS:2;3e92e60d7d96:46203 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:37,885 DEBUG [RS:0;3e92e60d7d96:39091 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:37,885 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,46203,1733532817264' 2024-12-07T00:53:37,885 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,39091,1733532817178' 2024-12-07T00:53:37,886 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:53:37,886 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:53:37,886 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:37,886 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1482): Serving as 3e92e60d7d96,36425,1733532817221, RpcServer on 3e92e60d7d96/172.17.0.2:36425, sessionid=0x101ad4791eb0002 2024-12-07T00:53:37,886 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:53:37,886 DEBUG [RS:1;3e92e60d7d96:36425 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:37,886 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,36425,1733532817221' 2024-12-07T00:53:37,886 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:53:37,886 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:53:37,886 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:53:37,887 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:53:37,887 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:53:37,887 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:53:37,887 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:53:37,887 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:53:37,887 DEBUG [RS:2;3e92e60d7d96:46203 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:37,887 DEBUG [RS:0;3e92e60d7d96:39091 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:37,887 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,46203,1733532817264' 2024-12-07T00:53:37,887 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,39091,1733532817178' 2024-12-07T00:53:37,887 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:53:37,887 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:53:37,887 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:53:37,887 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:53:37,887 DEBUG [RS:1;3e92e60d7d96:36425 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:37,887 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3e92e60d7d96,36425,1733532817221' 2024-12-07T00:53:37,887 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:53:37,887 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:53:37,887 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:53:37,888 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:53:37,888 DEBUG [RS:2;3e92e60d7d96:46203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:53:37,888 INFO [RS:2;3e92e60d7d96:46203 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:53:37,888 INFO [RS:2;3e92e60d7d96:46203 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:53:37,888 DEBUG [RS:0;3e92e60d7d96:39091 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:53:37,888 INFO [RS:0;3e92e60d7d96:39091 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:53:37,888 INFO [RS:0;3e92e60d7d96:39091 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:53:37,888 DEBUG [RS:1;3e92e60d7d96:36425 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:53:37,888 INFO [RS:1;3e92e60d7d96:36425 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:53:37,888 INFO [RS:1;3e92e60d7d96:36425 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:53:37,932 WARN [3e92e60d7d96:35309 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T00:53:37,993 INFO [RS:2;3e92e60d7d96:46203 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C46203%2C1733532817264, suffix=, logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,46203,1733532817264, archiveDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs, maxLogs=32 2024-12-07T00:53:37,993 INFO [RS:0;3e92e60d7d96:39091 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C39091%2C1733532817178, suffix=, logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,39091,1733532817178, archiveDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs, maxLogs=32 2024-12-07T00:53:37,993 INFO [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C36425%2C1733532817221, suffix=, logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,36425,1733532817221, archiveDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs, maxLogs=32 2024-12-07T00:53:37,998 INFO [RS:0;3e92e60d7d96:39091 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e92e60d7d96%2C39091%2C1733532817178.1733532817998 2024-12-07T00:53:37,998 INFO [RS:2;3e92e60d7d96:46203 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e92e60d7d96%2C46203%2C1733532817264.1733532817998 2024-12-07T00:53:37,998 INFO [RS:1;3e92e60d7d96:36425 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e92e60d7d96%2C36425%2C1733532817221.1733532817998 2024-12-07T00:53:38,010 INFO [RS:2;3e92e60d7d96:46203 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,46203,1733532817264/3e92e60d7d96%2C46203%2C1733532817264.1733532817998 2024-12-07T00:53:38,010 INFO [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,36425,1733532817221/3e92e60d7d96%2C36425%2C1733532817221.1733532817998 2024-12-07T00:53:38,010 INFO [RS:0;3e92e60d7d96:39091 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,39091,1733532817178/3e92e60d7d96%2C39091%2C1733532817178.1733532817998 2024-12-07T00:53:38,011 DEBUG [RS:2;3e92e60d7d96:46203 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38655:38655),(127.0.0.1/127.0.0.1:45005:45005),(127.0.0.1/127.0.0.1:34583:34583)] 2024-12-07T00:53:38,012 DEBUG [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45005:45005),(127.0.0.1/127.0.0.1:38655:38655),(127.0.0.1/127.0.0.1:34583:34583)] 2024-12-07T00:53:38,012 DEBUG [RS:0;3e92e60d7d96:39091 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:45005:45005),(127.0.0.1/127.0.0.1:38655:38655)] 2024-12-07T00:53:38,182 DEBUG [3e92e60d7d96:35309 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T00:53:38,183 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(204): Hosts are {3e92e60d7d96=0} racks are {/default-rack=0} 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T00:53:38,187 INFO [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T00:53:38,187 INFO [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T00:53:38,187 INFO [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T00:53:38,187 DEBUG [3e92e60d7d96:35309 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:53:38,188 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:38,191 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e92e60d7d96,36425,1733532817221, state=OPENING 2024-12-07T00:53:38,210 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T00:53:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:38,297 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,298 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T00:53:38,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,298 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3e92e60d7d96,36425,1733532817221}] 2024-12-07T00:53:38,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,454 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:53:38,456 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36689, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:53:38,461 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T00:53:38,461 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T00:53:38,464 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3e92e60d7d96%2C36425%2C1733532817221.meta, suffix=.meta, logDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,36425,1733532817221, archiveDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs, maxLogs=32 2024-12-07T00:53:38,465 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3e92e60d7d96%2C36425%2C1733532817221.meta.1733532818465.meta 2024-12-07T00:53:38,475 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/WALs/3e92e60d7d96,36425,1733532817221/3e92e60d7d96%2C36425%2C1733532817221.meta.1733532818465.meta 2024-12-07T00:53:38,481 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:45005:45005),(127.0.0.1/127.0.0.1:38655:38655)] 2024-12-07T00:53:38,482 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:53:38,483 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T00:53:38,483 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T00:53:38,483 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T00:53:38,483 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T00:53:38,483 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:38,483 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T00:53:38,483 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T00:53:38,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T00:53:38,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T00:53:38,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:38,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:38,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T00:53:38,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T00:53:38,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:38,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:38,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T00:53:38,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T00:53:38,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:38,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:38,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T00:53:38,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T00:53:38,493 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:38,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:53:38,494 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T00:53:38,494 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740 2024-12-07T00:53:38,496 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740 2024-12-07T00:53:38,498 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T00:53:38,498 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T00:53:38,498 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:53:38,500 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T00:53:38,501 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71344657, jitterRate=0.06311823427677155}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:53:38,501 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T00:53:38,502 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733532818484Writing region info on filesystem at 1733532818484Initializing all the Stores at 1733532818485 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532818485Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532818485Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532818485Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733532818485Cleaning up temporary data from old regions at 1733532818498 (+13 ms)Running coprocessor post-open hooks at 1733532818501 (+3 ms)Region opened successfully at 1733532818502 (+1 ms) 2024-12-07T00:53:38,504 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733532818454 2024-12-07T00:53:38,507 DEBUG [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T00:53:38,508 INFO [RS_OPEN_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T00:53:38,509 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:38,510 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3e92e60d7d96,36425,1733532817221, state=OPEN 2024-12-07T00:53:38,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:38,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:38,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:38,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:53:38,515 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:38,515 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,515 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,515 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,515 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:53:38,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T00:53:38,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3e92e60d7d96,36425,1733532817221 in 217 msec 2024-12-07T00:53:38,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T00:53:38,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 742 msec 2024-12-07T00:53:38,526 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:53:38,526 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T00:53:38,528 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T00:53:38,528 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e92e60d7d96,36425,1733532817221, seqNum=-1] 2024-12-07T00:53:38,528 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:53:38,531 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58049, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:53:38,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 846 msec 2024-12-07T00:53:38,540 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733532818540, completionTime=-1 2024-12-07T00:53:38,540 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T00:53:38,540 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T00:53:38,542 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-07T00:53:38,542 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733532878542 2024-12-07T00:53:38,542 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733532938542 2024-12-07T00:53:38,542 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T00:53:38,543 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:53:38,543 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35309,1733532816984-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,543 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35309,1733532816984-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,543 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35309,1733532816984-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,543 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3e92e60d7d96:35309, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,544 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,544 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,547 DEBUG [master/3e92e60d7d96:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T00:53:38,549 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.213sec 2024-12-07T00:53:38,549 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T00:53:38,550 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T00:53:38,550 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T00:53:38,550 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T00:53:38,550 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T00:53:38,550 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35309,1733532816984-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:53:38,550 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35309,1733532816984-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T00:53:38,553 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T00:53:38,553 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T00:53:38,553 INFO [master/3e92e60d7d96:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3e92e60d7d96,35309,1733532816984-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:53:38,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a77301a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:53:38,593 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3e92e60d7d96,35309,-1 for getting cluster id 2024-12-07T00:53:38,594 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T00:53:38,595 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2f86050c-bc3c-4857-92da-3237eca1ba25' 2024-12-07T00:53:38,596 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T00:53:38,596 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2f86050c-bc3c-4857-92da-3237eca1ba25" 2024-12-07T00:53:38,597 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e095e12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:53:38,597 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3e92e60d7d96,35309,-1] 2024-12-07T00:53:38,597 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T00:53:38,598 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:38,601 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56948, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T00:53:38,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10f5e381, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:53:38,604 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T00:53:38,607 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3e92e60d7d96,36425,1733532817221, seqNum=-1] 2024-12-07T00:53:38,608 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:53:38,610 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:53:38,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:38,613 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T00:53:38,615 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:38,615 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3dfff4b6 2024-12-07T00:53:38,615 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T00:53:38,617 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T00:53:38,618 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:53:38,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-07T00:53:38,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:53:38,622 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:38,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-07T00:53:38,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:38,624 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:53:38,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741837_1013 (size=392) 2024-12-07T00:53:38,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741837_1013 (size=392) 2024-12-07T00:53:38,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741837_1013 (size=392) 2024-12-07T00:53:38,637 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a80cb3034cf8a805dcd59208be4b7944, NAME => 'TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6 2024-12-07T00:53:38,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741838_1014 (size=51) 2024-12-07T00:53:38,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741838_1014 (size=51) 2024-12-07T00:53:38,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741838_1014 (size=51) 2024-12-07T00:53:38,653 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:38,653 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing a80cb3034cf8a805dcd59208be4b7944, disabling compactions & flushes 2024-12-07T00:53:38,653 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:38,653 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:38,653 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. after waiting 0 ms 2024-12-07T00:53:38,653 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:38,653 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:38,653 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for a80cb3034cf8a805dcd59208be4b7944: Waiting for close lock at 1733532818653Disabling compacts and flushes for region at 1733532818653Disabling writes for close at 1733532818653Writing region close event to WAL at 1733532818653Closed at 1733532818653 2024-12-07T00:53:38,655 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:53:38,656 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733532818656"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733532818656"}]},"ts":"1733532818656"} 2024-12-07T00:53:38,659 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T00:53:38,661 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:53:38,661 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733532818661"}]},"ts":"1733532818661"} 2024-12-07T00:53:38,664 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-07T00:53:38,664 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {3e92e60d7d96=0} racks are {/default-rack=0} 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-07T00:53:38,665 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-07T00:53:38,665 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-07T00:53:38,665 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-07T00:53:38,665 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:53:38,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a80cb3034cf8a805dcd59208be4b7944, ASSIGN}] 2024-12-07T00:53:38,668 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a80cb3034cf8a805dcd59208be4b7944, ASSIGN 2024-12-07T00:53:38,670 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a80cb3034cf8a805dcd59208be4b7944, ASSIGN; state=OFFLINE, location=3e92e60d7d96,46203,1733532817264; forceNewPlan=false, retain=false 2024-12-07T00:53:38,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:38,820 INFO [3e92e60d7d96:35309 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T00:53:38,821 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a80cb3034cf8a805dcd59208be4b7944, regionState=OPENING, regionLocation=3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:38,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a80cb3034cf8a805dcd59208be4b7944, ASSIGN because future has completed 2024-12-07T00:53:38,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a80cb3034cf8a805dcd59208be4b7944, server=3e92e60d7d96,46203,1733532817264}] 2024-12-07T00:53:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:38,986 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:53:38,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:53:38,995 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:38,996 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a80cb3034cf8a805dcd59208be4b7944, NAME => 'TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:53:38,997 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:38,997 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:53:38,997 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:38,997 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:38,999 INFO [StoreOpener-a80cb3034cf8a805dcd59208be4b7944-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,002 INFO [StoreOpener-a80cb3034cf8a805dcd59208be4b7944-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a80cb3034cf8a805dcd59208be4b7944 columnFamilyName cf 2024-12-07T00:53:39,002 DEBUG [StoreOpener-a80cb3034cf8a805dcd59208be4b7944-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:53:39,003 INFO [StoreOpener-a80cb3034cf8a805dcd59208be4b7944-1 {}] regionserver.HStore(327): Store=a80cb3034cf8a805dcd59208be4b7944/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:53:39,003 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,005 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,005 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,006 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,006 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,008 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,011 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:53:39,011 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a80cb3034cf8a805dcd59208be4b7944; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64566007, jitterRate=-0.03789152204990387}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:53:39,012 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,012 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a80cb3034cf8a805dcd59208be4b7944: Running coprocessor pre-open hook at 1733532818997Writing region info on filesystem at 1733532818997Initializing all the Stores at 1733532818999 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733532818999Cleaning up temporary data from old regions at 1733532819006 (+7 ms)Running coprocessor post-open hooks at 1733532819012 (+6 ms)Region opened successfully at 1733532819012 2024-12-07T00:53:39,014 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944., pid=6, masterSystemTime=1733532818986 2024-12-07T00:53:39,017 DEBUG [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,017 INFO [RS_OPEN_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,018 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a80cb3034cf8a805dcd59208be4b7944, regionState=OPEN, openSeqNum=2, regionLocation=3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:39,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a80cb3034cf8a805dcd59208be4b7944, server=3e92e60d7d96,46203,1733532817264 because future has completed 2024-12-07T00:53:39,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T00:53:39,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a80cb3034cf8a805dcd59208be4b7944, server=3e92e60d7d96,46203,1733532817264 in 193 msec 2024-12-07T00:53:39,031 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T00:53:39,031 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=a80cb3034cf8a805dcd59208be4b7944, ASSIGN in 362 msec 2024-12-07T00:53:39,033 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:53:39,033 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733532819033"}]},"ts":"1733532819033"} 2024-12-07T00:53:39,036 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-07T00:53:39,037 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:53:39,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 419 msec 2024-12-07T00:53:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T00:53:39,255 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T00:53:39,256 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-07T00:53:39,256 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:53:39,260 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T00:53:39,261 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T00:53:39,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-07T00:53:39,264 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T00:53:39,264 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:53:39,264 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T00:53:39,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-07T00:53:39,264 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T00:53:39,264 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T00:53:39,266 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC 2024-12-07T00:53:39,266 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestHBaseWalOnEC Metrics about Tables on a single HBase RegionServer 2024-12-07T00:53:39,269 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944., hostname=3e92e60d7d96,46203,1733532817264, seqNum=2] 2024-12-07T00:53:39,269 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:53:39,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:53:39,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-07T00:53:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-07T00:53:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T00:53:39,278 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-07T00:53:39,279 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T00:53:39,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T00:53:39,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T00:53:39,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46203 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T00:53:39,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,436 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing a80cb3034cf8a805dcd59208be4b7944 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-07T00:53:39,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/.tmp/cf/87dd2d87b1bf4ac782bf681abf3ef35a is 36, key is row/cf:cq/1733532819272/Put/seqid=0 2024-12-07T00:53:39,459 WARN [IPC Server handler 1 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:53:39,460 WARN [IPC Server handler 1 on default port 39499 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:53:39,460 WARN [IPC Server handler 1 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:53:39,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741839_1015 (size=4787) 2024-12-07T00:53:39,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741839_1015 (size=4787) 2024-12-07T00:53:39,465 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/.tmp/cf/87dd2d87b1bf4ac782bf681abf3ef35a 2024-12-07T00:53:39,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/.tmp/cf/87dd2d87b1bf4ac782bf681abf3ef35a as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/cf/87dd2d87b1bf4ac782bf681abf3ef35a 2024-12-07T00:53:39,483 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/cf/87dd2d87b1bf4ac782bf681abf3ef35a, entries=1, sequenceid=5, filesize=4.7 K 2024-12-07T00:53:39,484 INFO [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for a80cb3034cf8a805dcd59208be4b7944 in 48ms, sequenceid=5, compaction requested=false 2024-12-07T00:53:39,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for a80cb3034cf8a805dcd59208be4b7944: 2024-12-07T00:53:39,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3e92e60d7d96:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T00:53:39,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T00:53:39,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T00:53:39,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-12-07T00:53:39,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 217 msec 2024-12-07T00:53:39,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T00:53:39,592 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-07T00:53:39,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T00:53:39,596 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T00:53:39,596 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:39,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,596 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T00:53:39,596 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T00:53:39,597 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1026244102, stopped=false 2024-12-07T00:53:39,597 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3e92e60d7d96,35309,1733532816984 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:39,663 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:39,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:39,663 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T00:53:39,664 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:39,664 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:39,664 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,664 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:39,664 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:39,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e92e60d7d96,39091,1733532817178' ***** 2024-12-07T00:53:39,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:53:39,665 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T00:53:39,665 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e92e60d7d96,36425,1733532817221' ***** 2024-12-07T00:53:39,665 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:53:39,665 INFO [RS:0;3e92e60d7d96:39091 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:53:39,665 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T00:53:39,665 INFO [RS:0;3e92e60d7d96:39091 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:53:39,666 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(959): stopping server 3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:39,666 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:39,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T00:53:39,666 INFO [RS:0;3e92e60d7d96:39091 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3e92e60d7d96:39091. 2024-12-07T00:53:39,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3e92e60d7d96,46203,1733532817264' ***** 2024-12-07T00:53:39,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T00:53:39,666 DEBUG [RS:0;3e92e60d7d96:39091 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:39,666 DEBUG [RS:0;3e92e60d7d96:39091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,666 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:53:39,666 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:53:39,666 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(976): stopping server 3e92e60d7d96,39091,1733532817178; all regions closed. 2024-12-07T00:53:39,666 INFO [RS:1;3e92e60d7d96:36425 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:53:39,666 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T00:53:39,667 INFO [RS:1;3e92e60d7d96:36425 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:53:39,667 INFO [RS:2;3e92e60d7d96:46203 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:53:39,667 INFO [RS:2;3e92e60d7d96:46203 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:53:39,667 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T00:53:39,667 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(3091): Received CLOSE for a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,667 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(959): stopping server 3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:39,667 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:39,667 INFO [RS:1;3e92e60d7d96:36425 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3e92e60d7d96:36425. 2024-12-07T00:53:39,667 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(959): stopping server 3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:39,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,667 DEBUG [RS:1;3e92e60d7d96:36425 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:39,668 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:39,668 DEBUG [RS:1;3e92e60d7d96:36425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,668 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,668 INFO [RS:2;3e92e60d7d96:46203 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;3e92e60d7d96:46203. 2024-12-07T00:53:39,668 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:53:39,668 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,668 DEBUG [RS:2;3e92e60d7d96:46203 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:53:39,668 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a80cb3034cf8a805dcd59208be4b7944, disabling compactions & flushes 2024-12-07T00:53:39,668 DEBUG [RS:2;3e92e60d7d96:46203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,668 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:53:39,668 INFO [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,668 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:53:39,668 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,668 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T00:53:39,668 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1325): Online Regions={a80cb3034cf8a805dcd59208be4b7944=TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944.} 2024-12-07T00:53:39,668 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T00:53:39,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,669 DEBUG [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1351): Waiting on a80cb3034cf8a805dcd59208be4b7944 2024-12-07T00:53:39,669 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. after waiting 0 ms 2024-12-07T00:53:39,669 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,669 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T00:53:39,669 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T00:53:39,669 DEBUG [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T00:53:39,669 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T00:53:39,669 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T00:53:39,670 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T00:53:39,670 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T00:53:39,670 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T00:53:39,670 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-07T00:53:39,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741834_1010 (size=93) 2024-12-07T00:53:39,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741834_1010 (size=93) 2024-12-07T00:53:39,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741834_1010 (size=93) 2024-12-07T00:53:39,676 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/default/TestHBaseWalOnEC/a80cb3034cf8a805dcd59208be4b7944/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:53:39,676 DEBUG [RS:0;3e92e60d7d96:39091 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e92e60d7d96%2C39091%2C1733532817178:(num 1733532817998) 2024-12-07T00:53:39,677 DEBUG [RS:0;3e92e60d7d96:39091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.ChoreService(370): Chore service for: regionserver/3e92e60d7d96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:53:39,677 INFO [regionserver/3e92e60d7d96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:53:39,677 INFO [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:39,677 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a80cb3034cf8a805dcd59208be4b7944: Waiting for close lock at 1733532819668Running coprocessor pre-close hooks at 1733532819668Disabling compacts and flushes for region at 1733532819668Disabling writes for close at 1733532819669 (+1 ms)Writing region close event to WAL at 1733532819671 (+2 ms)Running coprocessor post-close hooks at 1733532819677 (+6 ms)Closed at 1733532819677 2024-12-07T00:53:39,677 INFO [RS:0;3e92e60d7d96:39091 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39091 2024-12-07T00:53:39,678 DEBUG [RS_CLOSE_REGION-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944. 2024-12-07T00:53:39,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:53:39,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e92e60d7d96,39091,1733532817178 2024-12-07T00:53:39,683 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:39,691 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/info/ce2af4219d38470d84b1b730d859fac1 is 153, key is TestHBaseWalOnEC,,1733532818618.a80cb3034cf8a805dcd59208be4b7944./info:regioninfo/1733532819018/Put/seqid=0 2024-12-07T00:53:39,693 WARN [IPC Server handler 3 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:53:39,693 WARN [IPC Server handler 3 on default port 39499 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:53:39,693 WARN [IPC Server handler 3 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:53:39,694 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e92e60d7d96,39091,1733532817178] 2024-12-07T00:53:39,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741840_1016 (size=6637) 2024-12-07T00:53:39,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741840_1016 (size=6637) 2024-12-07T00:53:39,698 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/info/ce2af4219d38470d84b1b730d859fac1 2024-12-07T00:53:39,704 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e92e60d7d96,39091,1733532817178 already deleted, retry=false 2024-12-07T00:53:39,704 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e92e60d7d96,39091,1733532817178 expired; onlineServers=2 2024-12-07T00:53:39,721 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/ns/8bb47965365f412692839df48ec660b2 is 43, key is default/ns:d/1733532818531/Put/seqid=0 2024-12-07T00:53:39,722 WARN [IPC Server handler 0 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:53:39,722 WARN [IPC Server handler 0 on default port 39499 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:53:39,722 WARN [IPC Server handler 0 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:53:39,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741841_1017 (size=5153) 2024-12-07T00:53:39,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741841_1017 (size=5153) 2024-12-07T00:53:39,728 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/ns/8bb47965365f412692839df48ec660b2 2024-12-07T00:53:39,750 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/table/8ed73e4beb954ed8ad6114137ed8ebde is 52, key is TestHBaseWalOnEC/table:state/1733532819033/Put/seqid=0 2024-12-07T00:53:39,751 WARN [IPC Server handler 2 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:53:39,751 WARN [IPC Server handler 2 on default port 39499 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:53:39,751 WARN [IPC Server handler 2 on default port 39499 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:53:39,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741842_1018 (size=5249) 2024-12-07T00:53:39,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741842_1018 (size=5249) 2024-12-07T00:53:39,757 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/table/8ed73e4beb954ed8ad6114137ed8ebde 2024-12-07T00:53:39,762 INFO [regionserver/3e92e60d7d96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:39,762 INFO [regionserver/3e92e60d7d96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:39,764 INFO [regionserver/3e92e60d7d96:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:39,767 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/info/ce2af4219d38470d84b1b730d859fac1 as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/info/ce2af4219d38470d84b1b730d859fac1 2024-12-07T00:53:39,775 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/info/ce2af4219d38470d84b1b730d859fac1, entries=10, sequenceid=11, filesize=6.5 K 2024-12-07T00:53:39,801 INFO [RS:0;3e92e60d7d96:39091 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:39,801 INFO [RS:0;3e92e60d7d96:39091 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e92e60d7d96,39091,1733532817178; zookeeper connection closed. 2024-12-07T00:53:39,801 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@173ae54 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@173ae54 2024-12-07T00:53:39,801 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/ns/8bb47965365f412692839df48ec660b2 as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/ns/8bb47965365f412692839df48ec660b2 2024-12-07T00:53:39,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:39,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39091-0x101ad4791eb0001, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:39,810 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/ns/8bb47965365f412692839df48ec660b2, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T00:53:39,811 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/.tmp/table/8ed73e4beb954ed8ad6114137ed8ebde as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/table/8ed73e4beb954ed8ad6114137ed8ebde 2024-12-07T00:53:39,820 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/table/8ed73e4beb954ed8ad6114137ed8ebde, entries=2, sequenceid=11, filesize=5.1 K 2024-12-07T00:53:39,821 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false 2024-12-07T00:53:39,828 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T00:53:39,828 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T00:53:39,828 INFO [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T00:53:39,829 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733532819669Running coprocessor pre-close hooks at 1733532819669Disabling compacts and flushes for region at 1733532819669Disabling writes for close at 1733532819670 (+1 ms)Obtaining lock to block concurrent updates at 1733532819670Preparing flush snapshotting stores in 1588230740 at 1733532819670Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733532819671 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733532819672 (+1 ms)Flushing 1588230740/info: creating writer at 1733532819672Flushing 1588230740/info: appending metadata at 1733532819691 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733532819691Flushing 1588230740/ns: creating writer at 1733532819706 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733532819720 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733532819720Flushing 1588230740/table: creating writer at 1733532819735 (+15 ms)Flushing 1588230740/table: appending metadata at 1733532819749 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733532819750 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17b0b4b1: reopening flushed file at 1733532819765 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21af01df: reopening flushed file at 1733532819776 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55afd3e4: reopening flushed file at 1733532819810 (+34 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 151ms, sequenceid=11, compaction requested=false at 1733532819821 (+11 ms)Writing region close event to WAL at 1733532819823 (+2 ms)Running coprocessor post-close hooks at 1733532819828 (+5 ms)Closed at 1733532819828 2024-12-07T00:53:39,829 DEBUG [RS_CLOSE_META-regionserver/3e92e60d7d96:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T00:53:39,864 INFO [regionserver/3e92e60d7d96:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T00:53:39,864 INFO [regionserver/3e92e60d7d96:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T00:53:39,864 INFO [regionserver/3e92e60d7d96:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T00:53:39,864 INFO [regionserver/3e92e60d7d96:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T00:53:39,869 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(976): stopping server 3e92e60d7d96,46203,1733532817264; all regions closed. 2024-12-07T00:53:39,869 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(976): stopping server 3e92e60d7d96,36425,1733532817221; all regions closed. 2024-12-07T00:53:39,869 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,869 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,870 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741836_1012 (size=2751) 2024-12-07T00:53:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741833_1009 (size=1298) 2024-12-07T00:53:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741833_1009 (size=1298) 2024-12-07T00:53:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741836_1012 (size=2751) 2024-12-07T00:53:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741833_1009 (size=1298) 2024-12-07T00:53:39,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741836_1012 (size=2751) 2024-12-07T00:53:39,876 DEBUG [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs 2024-12-07T00:53:39,876 DEBUG [RS:2;3e92e60d7d96:46203 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs 2024-12-07T00:53:39,876 INFO [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e92e60d7d96%2C36425%2C1733532817221.meta:.meta(num 1733532818465) 2024-12-07T00:53:39,876 INFO [RS:2;3e92e60d7d96:46203 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e92e60d7d96%2C46203%2C1733532817264:(num 1733532817998) 2024-12-07T00:53:39,876 DEBUG [RS:2;3e92e60d7d96:46203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,876 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:39,876 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:39,876 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.ChoreService(370): Chore service for: regionserver/3e92e60d7d96:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:39,876 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,876 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:53:39,876 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,876 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:53:39,877 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:53:39,877 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:39,877 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,877 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,877 INFO [RS:2;3e92e60d7d96:46203 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46203 2024-12-07T00:53:39,877 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:39,877 INFO [regionserver/3e92e60d7d96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:39,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741835_1011 (size=93) 2024-12-07T00:53:39,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741835_1011 (size=93) 2024-12-07T00:53:39,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741835_1011 (size=93) 2024-12-07T00:53:39,882 DEBUG [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/oldWALs 2024-12-07T00:53:39,882 INFO [RS:1;3e92e60d7d96:36425 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3e92e60d7d96%2C36425%2C1733532817221:(num 1733532817998) 2024-12-07T00:53:39,882 DEBUG [RS:1;3e92e60d7d96:36425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:53:39,882 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:53:39,882 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:39,883 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.ChoreService(370): Chore service for: regionserver/3e92e60d7d96:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:39,883 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:39,883 INFO [regionserver/3e92e60d7d96:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:39,883 INFO [RS:1;3e92e60d7d96:36425 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36425 2024-12-07T00:53:39,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:53:39,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e92e60d7d96,46203,1733532817264 2024-12-07T00:53:39,884 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:39,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3e92e60d7d96,36425,1733532817221 2024-12-07T00:53:39,894 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:39,894 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e92e60d7d96,46203,1733532817264] 2024-12-07T00:53:39,894 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f9cc88f5cc8@24362f8c rejected from java.util.concurrent.ThreadPoolExecutor@6134a56e[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-07T00:53:39,915 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e92e60d7d96,46203,1733532817264 already deleted, retry=false 2024-12-07T00:53:39,915 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e92e60d7d96,46203,1733532817264 expired; onlineServers=1 2024-12-07T00:53:39,915 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3e92e60d7d96,36425,1733532817221] 2024-12-07T00:53:39,925 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3e92e60d7d96,36425,1733532817221 already deleted, retry=false 2024-12-07T00:53:39,926 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3e92e60d7d96,36425,1733532817221 expired; onlineServers=0 2024-12-07T00:53:39,926 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3e92e60d7d96,35309,1733532816984' ***** 2024-12-07T00:53:39,926 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T00:53:39,926 INFO [M:0;3e92e60d7d96:35309 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T00:53:39,926 INFO [M:0;3e92e60d7d96:35309 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T00:53:39,926 DEBUG [M:0;3e92e60d7d96:35309 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T00:53:39,926 DEBUG [M:0;3e92e60d7d96:35309 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T00:53:39,926 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.small.0-1733532817720 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.small.0-1733532817720,5,FailOnTimeoutGroup] 2024-12-07T00:53:39,926 DEBUG [master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.large.0-1733532817720 {}] cleaner.HFileCleaner(306): Exit Thread[master/3e92e60d7d96:0:becomeActiveMaster-HFileCleaner.large.0-1733532817720,5,FailOnTimeoutGroup] 2024-12-07T00:53:39,926 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T00:53:39,926 INFO [M:0;3e92e60d7d96:35309 {}] hbase.ChoreService(370): Chore service for: master/3e92e60d7d96:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T00:53:39,927 INFO [M:0;3e92e60d7d96:35309 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T00:53:39,927 DEBUG [M:0;3e92e60d7d96:35309 {}] master.HMaster(1795): Stopping service threads 2024-12-07T00:53:39,927 INFO [M:0;3e92e60d7d96:35309 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T00:53:39,927 INFO [M:0;3e92e60d7d96:35309 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T00:53:39,927 INFO [M:0;3e92e60d7d96:35309 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T00:53:39,927 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T00:53:39,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T00:53:39,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:53:39,936 DEBUG [M:0;3e92e60d7d96:35309 {}] zookeeper.ZKUtil(347): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T00:53:39,936 WARN [M:0;3e92e60d7d96:35309 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T00:53:39,937 INFO [M:0;3e92e60d7d96:35309 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/.lastflushedseqids 2024-12-07T00:53:39,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741843_1019 (size=127) 2024-12-07T00:53:39,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741843_1019 (size=127) 2024-12-07T00:53:39,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741843_1019 (size=127) 2024-12-07T00:53:39,945 INFO [M:0;3e92e60d7d96:35309 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T00:53:39,946 INFO [M:0;3e92e60d7d96:35309 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T00:53:39,946 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T00:53:39,946 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:39,946 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:39,946 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T00:53:39,946 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:39,946 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-07T00:53:39,962 DEBUG [M:0;3e92e60d7d96:35309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3dcd5a46e2d54390be665d86aed1e4a3 is 82, key is hbase:meta,,1/info:regioninfo/1733532818508/Put/seqid=0 2024-12-07T00:53:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741844_1020 (size=5672) 2024-12-07T00:53:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741844_1020 (size=5672) 2024-12-07T00:53:39,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741844_1020 (size=5672) 2024-12-07T00:53:39,970 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3dcd5a46e2d54390be665d86aed1e4a3 2024-12-07T00:53:39,991 DEBUG [M:0;3e92e60d7d96:35309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/911c7e5381974b3cb6b32c9796ad7d89 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733532819039/Put/seqid=0 2024-12-07T00:53:39,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741845_1021 (size=6438) 2024-12-07T00:53:39,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741845_1021 (size=6438) 2024-12-07T00:53:39,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741845_1021 (size=6438) 2024-12-07T00:53:39,999 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/911c7e5381974b3cb6b32c9796ad7d89 2024-12-07T00:53:40,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:40,005 INFO [RS:2;3e92e60d7d96:46203 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:40,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46203-0x101ad4791eb0003, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:40,005 INFO [RS:2;3e92e60d7d96:46203 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e92e60d7d96,46203,1733532817264; zookeeper connection closed. 2024-12-07T00:53:40,005 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@408657ce {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@408657ce 2024-12-07T00:53:40,015 INFO [RS:1;3e92e60d7d96:36425 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:40,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:40,015 INFO [RS:1;3e92e60d7d96:36425 {}] regionserver.HRegionServer(1031): Exiting; stopping=3e92e60d7d96,36425,1733532817221; zookeeper connection closed. 2024-12-07T00:53:40,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36425-0x101ad4791eb0002, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:40,016 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5f0fc3a4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5f0fc3a4 2024-12-07T00:53:40,016 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T00:53:40,026 DEBUG [M:0;3e92e60d7d96:35309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5e4aa95045544c6c9f2773c586eb5da3 is 69, key is 3e92e60d7d96,36425,1733532817221/rs:state/1733532817761/Put/seqid=0 2024-12-07T00:53:40,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741846_1022 (size=5294) 2024-12-07T00:53:40,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741846_1022 (size=5294) 2024-12-07T00:53:40,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741846_1022 (size=5294) 2024-12-07T00:53:40,034 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5e4aa95045544c6c9f2773c586eb5da3 2024-12-07T00:53:40,041 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3dcd5a46e2d54390be665d86aed1e4a3 as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3dcd5a46e2d54390be665d86aed1e4a3 2024-12-07T00:53:40,049 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3dcd5a46e2d54390be665d86aed1e4a3, entries=8, sequenceid=72, filesize=5.5 K 2024-12-07T00:53:40,050 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/911c7e5381974b3cb6b32c9796ad7d89 as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/911c7e5381974b3cb6b32c9796ad7d89 2024-12-07T00:53:40,057 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/911c7e5381974b3cb6b32c9796ad7d89, entries=8, sequenceid=72, filesize=6.3 K 2024-12-07T00:53:40,058 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5e4aa95045544c6c9f2773c586eb5da3 as hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5e4aa95045544c6c9f2773c586eb5da3 2024-12-07T00:53:40,066 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39499/user/jenkins/test-data/30dc3d61-a153-9ca3-1ad3-4253a7e6dba6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5e4aa95045544c6c9f2773c586eb5da3, entries=3, sequenceid=72, filesize=5.2 K 2024-12-07T00:53:40,067 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false 2024-12-07T00:53:40,069 INFO [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:53:40,069 DEBUG [M:0;3e92e60d7d96:35309 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733532819946Disabling compacts and flushes for region at 1733532819946Disabling writes for close at 1733532819946Obtaining lock to block concurrent updates at 1733532819946Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733532819946Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27462, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733532819947 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733532819947Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733532819948 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733532819961 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733532819961Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733532819976 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733532819990 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733532819991 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733532820007 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733532820025 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733532820025Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c77b6cc: reopening flushed file at 1733532820040 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54b0748: reopening flushed file at 1733532820049 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a97fe21: reopening flushed file at 1733532820057 (+8 ms)Finished flush of dataSize ~26.82 KB/27462, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=72, compaction requested=false at 1733532820068 (+11 ms)Writing region close event to WAL at 1733532820069 (+1 ms)Closed at 1733532820069 2024-12-07T00:53:40,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:40,070 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:40,070 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:40,070 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:40,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T00:53:40,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37499 is added to blk_1073741830_1006 (size=32665) 2024-12-07T00:53:40,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39827 is added to blk_1073741830_1006 (size=32665) 2024-12-07T00:53:40,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41121 is added to blk_1073741830_1006 (size=32665) 2024-12-07T00:53:40,074 INFO [M:0;3e92e60d7d96:35309 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T00:53:40,074 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T00:53:40,074 INFO [M:0;3e92e60d7d96:35309 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35309 2024-12-07T00:53:40,074 INFO [M:0;3e92e60d7d96:35309 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T00:53:40,184 INFO [M:0;3e92e60d7d96:35309 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T00:53:40,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:40,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35309-0x101ad4791eb0000, quorum=127.0.0.1:53037, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:53:40,186 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e89cb0b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:40,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b3c8c82{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:40,187 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:40,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c77de1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:40,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73f6422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:40,188 WARN [BP-871738338-172.17.0.2-1733532814410 heartbeating to localhost/127.0.0.1:39499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:53:40,188 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:53:40,188 WARN [BP-871738338-172.17.0.2-1733532814410 heartbeating to localhost/127.0.0.1:39499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871738338-172.17.0.2-1733532814410 (Datanode Uuid f1fefc16-19c2-40a3-9dba-eb9db4a16d4a) service to localhost/127.0.0.1:39499 2024-12-07T00:53:40,188 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:53:40,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data5/current/BP-871738338-172.17.0.2-1733532814410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:40,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data6/current/BP-871738338-172.17.0.2-1733532814410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:40,189 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:53:40,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f8d2ee2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:40,192 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6beabb01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:40,192 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:40,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e5afbc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:40,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c597470{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:40,194 WARN [BP-871738338-172.17.0.2-1733532814410 heartbeating to localhost/127.0.0.1:39499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:53:40,194 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:53:40,194 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:53:40,194 WARN [BP-871738338-172.17.0.2-1733532814410 heartbeating to localhost/127.0.0.1:39499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871738338-172.17.0.2-1733532814410 (Datanode Uuid 2290421d-9a84-465e-986a-079573315aa1) service to localhost/127.0.0.1:39499 2024-12-07T00:53:40,194 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data3/current/BP-871738338-172.17.0.2-1733532814410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:40,195 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data4/current/BP-871738338-172.17.0.2-1733532814410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:40,195 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:53:40,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@700f39d7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:53:40,202 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e9ae4fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:40,202 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:40,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61d23bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:40,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@137179d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:40,203 WARN [BP-871738338-172.17.0.2-1733532814410 heartbeating to localhost/127.0.0.1:39499 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:53:40,203 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:53:40,204 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:53:40,204 WARN [BP-871738338-172.17.0.2-1733532814410 heartbeating to localhost/127.0.0.1:39499 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-871738338-172.17.0.2-1733532814410 (Datanode Uuid efb9f2e9-030f-4934-beb2-301326d5da03) service to localhost/127.0.0.1:39499 2024-12-07T00:53:40,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data1/current/BP-871738338-172.17.0.2-1733532814410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:40,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/cluster_13e1d1bc-d17a-1336-b141-d6bfc5cc525b/data/data2/current/BP-871738338-172.17.0.2-1733532814410 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:53:40,204 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:53:40,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ffa125c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T00:53:40,210 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3aa18531{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:53:40,210 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:53:40,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16eaa68d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:53:40,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f854cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ac43e818-c7c4-334b-4622-c91c43b2bba5/hadoop.log.dir/,STOPPED} 2024-12-07T00:53:40,219 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T00:53:40,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T00:53:40,247 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=149 (was 88) - Thread LEAK? -, OpenFileDescriptor=518 (was 441) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 225), ProcessCount=11 (was 11), AvailableMemoryMB=7555 (was 7793)