2024-12-09 05:47:41,866 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 05:47:41,877 main DEBUG Took 0.009127 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 05:47:41,877 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 05:47:41,878 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 05:47:41,878 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 05:47:41,880 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,888 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 05:47:41,899 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,900 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,901 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,901 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,903 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,903 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,904 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,905 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,906 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,906 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,906 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,907 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,907 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,907 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,907 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,908 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,908 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,908 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 05:47:41,909 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,909 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 05:47:41,910 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 05:47:41,912 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 05:47:41,920 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 05:47:41,921 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 05:47:41,923 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 05:47:41,923 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 05:47:41,934 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 05:47:41,938 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 05:47:41,941 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 05:47:41,941 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 05:47:41,942 main DEBUG createAppenders(={Console}) 2024-12-09 05:47:41,943 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-09 05:47:41,943 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 05:47:41,944 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-09 05:47:41,945 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 05:47:41,945 main DEBUG OutputStream closed 2024-12-09 05:47:41,945 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 05:47:41,946 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 05:47:41,946 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-09 05:47:42,023 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 05:47:42,025 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 05:47:42,026 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 05:47:42,027 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 05:47:42,028 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 05:47:42,028 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 05:47:42,029 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 05:47:42,029 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 05:47:42,029 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 05:47:42,030 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 05:47:42,030 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 05:47:42,030 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 05:47:42,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 05:47:42,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 05:47:42,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 05:47:42,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 05:47:42,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 05:47:42,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 05:47:42,035 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 05:47:42,036 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-09 05:47:42,036 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 05:47:42,037 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-09T05:47:42,058 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-09 05:47:42,062 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 05:47:42,063 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T05:47:42,307 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3 2024-12-09T05:47:42,329 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed, deleteOnExit=true 2024-12-09T05:47:42,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/test.cache.data in system properties and HBase conf 2024-12-09T05:47:42,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:47:42,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:47:42,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:47:42,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:47:42,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T05:47:42,414 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T05:47:42,504 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:47:42,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:47:42,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:47:42,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:47:42,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:47:42,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:47:42,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:47:42,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:47:42,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:47:42,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:47:42,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:47:42,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:47:42,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:47:42,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:47:42,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:47:43,172 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T05:47:43,241 INFO [Time-limited test {}] log.Log(170): Logging initialized @1935ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T05:47:43,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:43,365 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:43,382 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:43,383 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:43,384 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:47:43,396 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:43,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:43,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:43,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/java.io.tmpdir/jetty-localhost-34303-hadoop-hdfs-3_4_1-tests_jar-_-any-8969895456443835659/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:47:43,571 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:34303} 2024-12-09T05:47:43,571 INFO [Time-limited test {}] server.Server(415): Started @2266ms 2024-12-09T05:47:43,872 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:43,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:43,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:43,879 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:43,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:47:43,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:43,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:43,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/java.io.tmpdir/jetty-localhost-36457-hadoop-hdfs-3_4_1-tests_jar-_-any-10318703746215489590/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:43,975 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:36457} 2024-12-09T05:47:43,975 INFO [Time-limited test {}] server.Server(415): Started @2670ms 2024-12-09T05:47:44,021 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:47:44,118 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:44,122 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:44,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:44,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:44,124 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:47:44,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:44,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:44,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/java.io.tmpdir/jetty-localhost-36135-hadoop-hdfs-3_4_1-tests_jar-_-any-17404829804199257505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:44,220 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:36135} 2024-12-09T05:47:44,221 INFO [Time-limited test {}] server.Server(415): Started @2916ms 2024-12-09T05:47:44,223 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:47:44,258 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:44,264 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:44,265 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:44,265 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:44,266 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:47:44,266 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:44,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:44,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/java.io.tmpdir/jetty-localhost-42409-hadoop-hdfs-3_4_1-tests_jar-_-any-1722897341495758528/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:44,370 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:42409} 2024-12-09T05:47:44,370 INFO [Time-limited test {}] server.Server(415): Started @3065ms 2024-12-09T05:47:44,375 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:47:44,379 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data4/current/BP-1006126246-172.17.0.2-1733723262988/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:44,379 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data2/current/BP-1006126246-172.17.0.2-1733723262988/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:44,379 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data3/current/BP-1006126246-172.17.0.2-1733723262988/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:44,379 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data1/current/BP-1006126246-172.17.0.2-1733723262988/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:44,421 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:47:44,421 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:47:44,462 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data5/current/BP-1006126246-172.17.0.2-1733723262988/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:44,462 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data6/current/BP-1006126246-172.17.0.2-1733723262988/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:44,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d1371dcfbc6886c with lease ID 0xf799d3f0f382f968: Processing first storage report for DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882 from datanode DatanodeRegistration(127.0.0.1:43547, datanodeUuid=b33d9565-76f7-4382-821e-f7b88b509044, infoPort=33465, infoSecurePort=0, ipcPort=40013, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988) 2024-12-09T05:47:44,495 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d1371dcfbc6886c with lease ID 0xf799d3f0f382f968: from storage DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882 node DatanodeRegistration(127.0.0.1:43547, datanodeUuid=b33d9565-76f7-4382-821e-f7b88b509044, infoPort=33465, infoSecurePort=0, ipcPort=40013, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:47:44,496 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcecb29f5befcd5ab with lease ID 0xf799d3f0f382f967: Processing first storage report for DS-7e92fe02-692d-4b13-a119-abdd27e295f8 from datanode DatanodeRegistration(127.0.0.1:42767, datanodeUuid=0012ce7d-5b78-402f-a5b7-4b40c54c9d6e, infoPort=36941, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988) 2024-12-09T05:47:44,496 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcecb29f5befcd5ab with lease ID 0xf799d3f0f382f967: from storage DS-7e92fe02-692d-4b13-a119-abdd27e295f8 node DatanodeRegistration(127.0.0.1:42767, datanodeUuid=0012ce7d-5b78-402f-a5b7-4b40c54c9d6e, infoPort=36941, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:47:44,496 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d1371dcfbc6886c with lease ID 0xf799d3f0f382f968: Processing first storage report for DS-4dc7d5da-45c9-4e7e-810f-655fd578667d from datanode DatanodeRegistration(127.0.0.1:43547, datanodeUuid=b33d9565-76f7-4382-821e-f7b88b509044, infoPort=33465, infoSecurePort=0, ipcPort=40013, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988) 2024-12-09T05:47:44,496 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d1371dcfbc6886c with lease ID 0xf799d3f0f382f968: from storage DS-4dc7d5da-45c9-4e7e-810f-655fd578667d node DatanodeRegistration(127.0.0.1:43547, datanodeUuid=b33d9565-76f7-4382-821e-f7b88b509044, infoPort=33465, infoSecurePort=0, ipcPort=40013, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:44,496 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcecb29f5befcd5ab with lease ID 0xf799d3f0f382f967: Processing first storage report for DS-41b43006-f49b-4829-9b36-70372ee31c10 from datanode DatanodeRegistration(127.0.0.1:42767, datanodeUuid=0012ce7d-5b78-402f-a5b7-4b40c54c9d6e, infoPort=36941, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988) 2024-12-09T05:47:44,496 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcecb29f5befcd5ab with lease ID 0xf799d3f0f382f967: from storage DS-41b43006-f49b-4829-9b36-70372ee31c10 node DatanodeRegistration(127.0.0.1:42767, datanodeUuid=0012ce7d-5b78-402f-a5b7-4b40c54c9d6e, infoPort=36941, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:44,498 WARN [Thread-119 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:47:44,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73a3dfca5b7dfa08 with lease ID 0xf799d3f0f382f969: Processing first storage report for DS-96ca065a-8ec9-4473-8f16-a302329ae6ef from datanode DatanodeRegistration(127.0.0.1:45353, datanodeUuid=72f28de5-dd1c-47d0-90a1-baeea8cd3857, infoPort=38145, infoSecurePort=0, ipcPort=33215, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988) 2024-12-09T05:47:44,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73a3dfca5b7dfa08 with lease ID 0xf799d3f0f382f969: from storage DS-96ca065a-8ec9-4473-8f16-a302329ae6ef node DatanodeRegistration(127.0.0.1:45353, datanodeUuid=72f28de5-dd1c-47d0-90a1-baeea8cd3857, infoPort=38145, infoSecurePort=0, ipcPort=33215, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:44,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x73a3dfca5b7dfa08 with lease ID 0xf799d3f0f382f969: Processing first storage report for DS-5db337a3-995f-48ba-b820-54abf98ee37f from datanode DatanodeRegistration(127.0.0.1:45353, datanodeUuid=72f28de5-dd1c-47d0-90a1-baeea8cd3857, infoPort=38145, infoSecurePort=0, ipcPort=33215, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988) 2024-12-09T05:47:44,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x73a3dfca5b7dfa08 with lease ID 0xf799d3f0f382f969: from storage DS-5db337a3-995f-48ba-b820-54abf98ee37f node DatanodeRegistration(127.0.0.1:45353, datanodeUuid=72f28de5-dd1c-47d0-90a1-baeea8cd3857, infoPort=38145, infoSecurePort=0, ipcPort=33215, storageInfo=lv=-57;cid=testClusterID;nsid=1715422273;c=1733723262988), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:44,704 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3 2024-12-09T05:47:44,769 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-09T05:47:44,815 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=378, ProcessCount=11, AvailableMemoryMB=8969 2024-12-09T05:47:44,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:47:44,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-09T05:47:44,884 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/zookeeper_0, clientPort=49886, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:47:44,894 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49886 2024-12-09T05:47:44,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:44,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:45,006 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:45,006 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:45,058 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:55034 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55034 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:45,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-09T05:47:45,483 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:45,495 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05 with version=8 2024-12-09T05:47:45,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/hbase-staging 2024-12-09T05:47:45,570 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T05:47:45,756 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:45,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:45,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:45,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:45,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:45,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:45,890 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T05:47:45,946 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T05:47:45,954 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T05:47:45,957 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:45,981 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 80644 (auto-detected) 2024-12-09T05:47:45,982 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T05:47:46,000 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33537 2024-12-09T05:47:46,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33537 connecting to ZooKeeper ensemble=127.0.0.1:49886 2024-12-09T05:47:46,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335370x0, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:46,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33537-0x100bd84f3d20000 connected 2024-12-09T05:47:46,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,080 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:46,084 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05, hbase.cluster.distributed=false 2024-12-09T05:47:46,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:46,110 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33537 2024-12-09T05:47:46,110 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33537 2024-12-09T05:47:46,110 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33537 2024-12-09T05:47:46,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33537 2024-12-09T05:47:46,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33537 2024-12-09T05:47:46,202 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:46,203 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,204 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,204 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:46,204 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,204 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:46,206 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:47:46,208 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:46,209 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33997 2024-12-09T05:47:46,211 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33997 connecting to ZooKeeper ensemble=127.0.0.1:49886 2024-12-09T05:47:46,211 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,215 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339970x0, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:46,223 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339970x0, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:46,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33997-0x100bd84f3d20001 connected 2024-12-09T05:47:46,227 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:47:46,234 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:47:46,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:47:46,241 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:46,242 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33997 2024-12-09T05:47:46,242 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33997 2024-12-09T05:47:46,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33997 2024-12-09T05:47:46,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33997 2024-12-09T05:47:46,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33997 2024-12-09T05:47:46,258 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:46,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,258 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:46,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,259 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:46,259 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:47:46,259 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:46,260 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44413 2024-12-09T05:47:46,262 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44413 connecting to ZooKeeper ensemble=127.0.0.1:49886 2024-12-09T05:47:46,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444130x0, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:46,272 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:444130x0, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:46,272 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44413-0x100bd84f3d20002 connected 2024-12-09T05:47:46,273 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:47:46,276 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:47:46,278 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:47:46,280 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:46,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44413 2024-12-09T05:47:46,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44413 2024-12-09T05:47:46,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44413 2024-12-09T05:47:46,285 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44413 2024-12-09T05:47:46,286 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44413 2024-12-09T05:47:46,300 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:46,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,300 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:46,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:46,300 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:46,300 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:47:46,301 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:46,301 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37663 2024-12-09T05:47:46,303 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37663 connecting to ZooKeeper ensemble=127.0.0.1:49886 2024-12-09T05:47:46,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,306 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376630x0, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:46,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376630x0, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:46,311 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37663-0x100bd84f3d20003 connected 2024-12-09T05:47:46,312 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:47:46,312 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:47:46,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:47:46,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:46,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37663 2024-12-09T05:47:46,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37663 2024-12-09T05:47:46,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37663 2024-12-09T05:47:46,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37663 2024-12-09T05:47:46,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37663 2024-12-09T05:47:46,337 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7f75e6015732:33537 2024-12-09T05:47:46,338 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7f75e6015732,33537,1733723265612 2024-12-09T05:47:46,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,345 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7f75e6015732,33537,1733723265612 2024-12-09T05:47:46,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:46,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:46,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:46,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,364 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:47:46,365 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7f75e6015732,33537,1733723265612 from backup master directory 2024-12-09T05:47:46,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7f75e6015732,33537,1733723265612 2024-12-09T05:47:46,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:46,368 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:46,368 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7f75e6015732,33537,1733723265612 2024-12-09T05:47:46,370 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T05:47:46,371 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T05:47:46,427 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/hbase.id] with ID: fbd63f94-6cc1-4c40-abcd-c633bb1e899f 2024-12-09T05:47:46,428 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/.tmp/hbase.id 2024-12-09T05:47:46,434 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,434 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,440 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:44954 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:43547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44954 dst: /127.0.0.1:43547 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-09T05:47:46,446 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:46,446 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/.tmp/hbase.id]:[hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/hbase.id] 2024-12-09T05:47:46,488 INFO [master/7f75e6015732:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:46,492 INFO [master/7f75e6015732:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T05:47:46,508 INFO [master/7f75e6015732:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-09T05:47:46,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:46,522 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,522 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,525 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:50960 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:45353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50960 dst: /127.0.0.1:45353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:46,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-09T05:47:46,532 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:46,546 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:47:46,548 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:47:46,552 INFO [master/7f75e6015732:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T05:47:46,578 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,578 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,580 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:44994 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44994 dst: /127.0.0.1:43547 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:46,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-09T05:47:46,586 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:46,601 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store 2024-12-09T05:47:46,617 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,617 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:46,620 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:45000 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45000 dst: /127.0.0.1:43547 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:46,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-09T05:47:46,625 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:46,629 INFO [master/7f75e6015732:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T05:47:46,632 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:46,633 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:47:46,633 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:46,633 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:46,635 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:47:46,635 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:46,635 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:46,636 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733723266633Disabling compacts and flushes for region at 1733723266633Disabling writes for close at 1733723266635 (+2 ms)Writing region close event to WAL at 1733723266635Closed at 1733723266635 2024-12-09T05:47:46,638 WARN [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/.initializing 2024-12-09T05:47:46,638 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/WALs/7f75e6015732,33537,1733723265612 2024-12-09T05:47:46,647 INFO [master/7f75e6015732:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T05:47:46,662 INFO [master/7f75e6015732:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C33537%2C1733723265612, suffix=, logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/WALs/7f75e6015732,33537,1733723265612, archiveDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/oldWALs, maxLogs=10 2024-12-09T05:47:46,698 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/WALs/7f75e6015732,33537,1733723265612/7f75e6015732%2C33537%2C1733723265612.1733723266667, exclude list is [], retry=0 2024-12-09T05:47:46,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:46,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45353,DS-96ca065a-8ec9-4473-8f16-a302329ae6ef,DISK] 2024-12-09T05:47:46,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42767,DS-7e92fe02-692d-4b13-a119-abdd27e295f8,DISK] 2024-12-09T05:47:46,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43547,DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882,DISK] 2024-12-09T05:47:46,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T05:47:46,753 INFO [master/7f75e6015732:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/WALs/7f75e6015732,33537,1733723265612/7f75e6015732%2C33537%2C1733723265612.1733723266667 2024-12-09T05:47:46,754 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33465:33465),(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:36941:36941)] 2024-12-09T05:47:46,755 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:47:46,755 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:46,758 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,758 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:47:46,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:46,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:46,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:47:46,819 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:46,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:46,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:47:46,823 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:46,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:46,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:47:46,827 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:46,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:46,828 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,831 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,832 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,842 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:47:46,846 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:46,852 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:47:46,854 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68329503, jitterRate=0.01818893849849701}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:47:46,860 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733723266769Initializing all the Stores at 1733723266771 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723266771Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723266772 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723266772Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723266772Cleaning up temporary data from old regions at 1733723266839 (+67 ms)Region opened successfully at 1733723266860 (+21 ms) 2024-12-09T05:47:46,861 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:47:46,893 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38d256fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:46,926 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T05:47:46,936 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:47:46,936 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:47:46,938 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:47:46,939 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T05:47:46,944 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-09T05:47:46,944 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:47:46,967 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:47:46,974 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:47:46,975 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:47:46,977 INFO [master/7f75e6015732:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:47:46,979 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:47:46,980 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:47:46,982 INFO [master/7f75e6015732:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:47:46,985 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:47:46,986 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:47:46,987 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:47:46,989 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:47:47,004 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:47:47,005 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,010 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7f75e6015732,33537,1733723265612, sessionid=0x100bd84f3d20000, setting cluster-up flag (Was=false) 2024-12-09T05:47:47,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,023 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:47:47,024 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7f75e6015732,33537,1733723265612 2024-12-09T05:47:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,033 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:47:47,035 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7f75e6015732,33537,1733723265612 2024-12-09T05:47:47,041 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T05:47:47,105 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:47,112 INFO [master/7f75e6015732:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T05:47:47,118 INFO [master/7f75e6015732:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:47:47,121 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(746): ClusterId : fbd63f94-6cc1-4c40-abcd-c633bb1e899f 2024-12-09T05:47:47,121 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(746): ClusterId : fbd63f94-6cc1-4c40-abcd-c633bb1e899f 2024-12-09T05:47:47,121 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(746): ClusterId : fbd63f94-6cc1-4c40-abcd-c633bb1e899f 2024-12-09T05:47:47,123 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:47:47,123 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:47:47,123 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:47:47,124 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7f75e6015732,33537,1733723265612 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:47:47,128 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:47:47,128 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:47:47,128 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:47:47,128 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:47:47,128 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:47:47,128 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:47:47,131 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:47:47,131 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:47:47,131 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:47:47,131 DEBUG [RS:1;7f75e6015732:44413 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b6b1a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:47,131 DEBUG [RS:2;7f75e6015732:37663 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dc2dec4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:47,132 DEBUG [RS:0;7f75e6015732:33997 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74ec1c6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:47,132 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:47,132 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:47,132 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:47,132 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:47,132 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7f75e6015732:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:47:47,133 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,133 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:47,133 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,143 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733723297143 2024-12-09T05:47:47,145 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:47:47,145 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;7f75e6015732:37663 2024-12-09T05:47:47,147 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:47:47,149 DEBUG [RS:1;7f75e6015732:44413 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7f75e6015732:44413 2024-12-09T05:47:47,150 INFO [RS:1;7f75e6015732:44413 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T05:47:47,150 INFO [RS:2;7f75e6015732:37663 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T05:47:47,150 INFO [RS:1;7f75e6015732:44413 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T05:47:47,150 INFO [RS:2;7f75e6015732:37663 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T05:47:47,150 DEBUG [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T05:47:47,150 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T05:47:47,150 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7f75e6015732:33997 2024-12-09T05:47:47,150 INFO [RS:0;7f75e6015732:33997 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T05:47:47,150 INFO [RS:0;7f75e6015732:33997 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T05:47:47,150 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T05:47:47,150 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:47:47,151 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:47:47,151 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:47:47,151 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:47:47,151 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:47,152 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:47:47,153 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(2659): reportForDuty to master=7f75e6015732,33537,1733723265612 with port=37663, startcode=1733723266299 2024-12-09T05:47:47,153 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(2659): reportForDuty to master=7f75e6015732,33537,1733723265612 with port=33997, startcode=1733723266172 2024-12-09T05:47:47,153 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(2659): reportForDuty to master=7f75e6015732,33537,1733723265612 with port=44413, startcode=1733723266257 2024-12-09T05:47:47,152 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,157 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:47:47,157 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,157 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:47:47,158 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:47:47,159 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:47:47,161 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:47:47,161 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:47:47,165 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.large.0-1733723267162,5,FailOnTimeoutGroup] 2024-12-09T05:47:47,166 DEBUG [RS:2;7f75e6015732:37663 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:47:47,166 DEBUG [RS:0;7f75e6015732:33997 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:47:47,166 DEBUG [RS:1;7f75e6015732:44413 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:47:47,167 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.small.0-1733723267165,5,FailOnTimeoutGroup] 2024-12-09T05:47:47,167 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,168 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:47:47,169 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,169 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,175 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:47,175 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:47,179 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:50990 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:45353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50990 dst: /127.0.0.1:45353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:47,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-09T05:47:47,192 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:47,195 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T05:47:47,195 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05 2024-12-09T05:47:47,207 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:47:47,207 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45573, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:47:47,208 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40725, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:47:47,208 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:47,208 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:47,215 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33537 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,218 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33537 {}] master.ServerManager(517): Registering regionserver=7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:55080 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55080 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:47,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-09T05:47:47,227 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:47,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:47,231 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33537 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7f75e6015732,44413,1733723266257 2024-12-09T05:47:47,231 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33537 {}] master.ServerManager(517): Registering regionserver=7f75e6015732,44413,1733723266257 2024-12-09T05:47:47,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:47:47,235 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05 2024-12-09T05:47:47,235 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43973 2024-12-09T05:47:47,235 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T05:47:47,236 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33537 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7f75e6015732,33997,1733723266172 2024-12-09T05:47:47,237 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33537 {}] master.ServerManager(517): Registering regionserver=7f75e6015732,33997,1733723266172 2024-12-09T05:47:47,237 DEBUG [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05 2024-12-09T05:47:47,237 DEBUG [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43973 2024-12-09T05:47:47,237 DEBUG [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T05:47:47,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:47:47,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:47,240 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05 2024-12-09T05:47:47,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,240 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43973 2024-12-09T05:47:47,240 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T05:47:47,241 DEBUG [RS:2;7f75e6015732:37663 {}] zookeeper.ZKUtil(111): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:47,242 WARN [RS:2;7f75e6015732:37663 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:47,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T05:47:47,242 INFO [RS:2;7f75e6015732:37663 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T05:47:47,242 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,242 DEBUG [RS:1;7f75e6015732:44413 {}] zookeeper.ZKUtil(111): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7f75e6015732,44413,1733723266257 2024-12-09T05:47:47,242 WARN [RS:1;7f75e6015732:44413 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:47,243 INFO [RS:1;7f75e6015732:44413 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T05:47:47,243 DEBUG [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,44413,1733723266257 2024-12-09T05:47:47,244 DEBUG [RS:0;7f75e6015732:33997 {}] zookeeper.ZKUtil(111): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7f75e6015732,33997,1733723266172 2024-12-09T05:47:47,244 WARN [RS:0;7f75e6015732:33997 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:47,245 INFO [RS:0;7f75e6015732:33997 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T05:47:47,245 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7f75e6015732,37663,1733723266299] 2024-12-09T05:47:47,245 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7f75e6015732,44413,1733723266257] 2024-12-09T05:47:47,245 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7f75e6015732,33997,1733723266172] 2024-12-09T05:47:47,245 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,33997,1733723266172 2024-12-09T05:47:47,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T05:47:47,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:47,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:47:47,249 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:47:47,250 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:47,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:47:47,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:47:47,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:47,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T05:47:47,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740 2024-12-09T05:47:47,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740 2024-12-09T05:47:47,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T05:47:47,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T05:47:47,262 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:47:47,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T05:47:47,279 INFO [RS:0;7f75e6015732:33997 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:47:47,279 INFO [RS:2;7f75e6015732:37663 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:47:47,279 INFO [RS:1;7f75e6015732:44413 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:47:47,282 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:47:47,283 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60548707, jitterRate=-0.09775395691394806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:47:47,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733723267229Initializing all the Stores at 1733723267231 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723267231Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723267232 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723267232Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723267232Cleaning up temporary data from old regions at 1733723267261 (+29 ms)Region opened successfully at 1733723267285 (+24 ms) 2024-12-09T05:47:47,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:47:47,285 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T05:47:47,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T05:47:47,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:47:47,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:47:47,287 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T05:47:47,287 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733723267285Disabling compacts and flushes for region at 1733723267285Disabling writes for close at 1733723267285Writing region close event to WAL at 1733723267286 (+1 ms)Closed at 1733723267287 (+1 ms) 2024-12-09T05:47:47,290 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:47,290 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T05:47:47,294 INFO [RS:1;7f75e6015732:44413 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:47:47,294 INFO [RS:0;7f75e6015732:33997 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:47:47,294 INFO [RS:2;7f75e6015732:37663 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:47:47,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:47:47,299 INFO [RS:0;7f75e6015732:33997 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:47:47,299 INFO [RS:2;7f75e6015732:37663 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:47:47,299 INFO [RS:1;7f75e6015732:44413 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:47:47,299 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,299 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,299 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,302 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T05:47:47,304 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T05:47:47,305 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T05:47:47,305 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:47:47,310 INFO [RS:0;7f75e6015732:33997 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T05:47:47,310 INFO [RS:1;7f75e6015732:44413 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T05:47:47,310 INFO [RS:2;7f75e6015732:37663 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T05:47:47,311 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:47:47,312 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,312 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,312 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,312 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,312 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,312 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:47,313 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,313 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:47,314 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:47,314 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,314 DEBUG [RS:2;7f75e6015732:37663 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:47,314 DEBUG [RS:0;7f75e6015732:33997 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:47,314 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,315 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,315 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,315 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:47,315 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:47,315 DEBUG [RS:1;7f75e6015732:44413 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:47,316 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,37663,1733723266299-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,316 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33997,1733723266172-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:47,321 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,321 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,321 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,321 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,321 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,322 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,44413,1733723266257-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:47,335 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:47:47,335 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:47:47,337 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33997,1733723266172-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,337 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,37663,1733723266299-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,338 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,338 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,338 INFO [RS:2;7f75e6015732:37663 {}] regionserver.Replication(171): 7f75e6015732,37663,1733723266299 started 2024-12-09T05:47:47,338 INFO [RS:0;7f75e6015732:33997 {}] regionserver.Replication(171): 7f75e6015732,33997,1733723266172 started 2024-12-09T05:47:47,345 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:47:47,345 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,44413,1733723266257-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,345 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,345 INFO [RS:1;7f75e6015732:44413 {}] regionserver.Replication(171): 7f75e6015732,44413,1733723266257 started 2024-12-09T05:47:47,356 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,356 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1482): Serving as 7f75e6015732,33997,1733723266172, RpcServer on 7f75e6015732/172.17.0.2:33997, sessionid=0x100bd84f3d20001 2024-12-09T05:47:47,357 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:47:47,357 DEBUG [RS:0;7f75e6015732:33997 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7f75e6015732,33997,1733723266172 2024-12-09T05:47:47,357 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,33997,1733723266172' 2024-12-09T05:47:47,357 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:47:47,358 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:47:47,359 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,359 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:47:47,359 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1482): Serving as 7f75e6015732,37663,1733723266299, RpcServer on 7f75e6015732/172.17.0.2:37663, sessionid=0x100bd84f3d20003 2024-12-09T05:47:47,359 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:47:47,359 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:47:47,359 DEBUG [RS:0;7f75e6015732:33997 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7f75e6015732,33997,1733723266172 2024-12-09T05:47:47,359 DEBUG [RS:2;7f75e6015732:37663 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,359 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,33997,1733723266172' 2024-12-09T05:47:47,359 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,37663,1733723266299' 2024-12-09T05:47:47,360 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:47:47,360 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:47:47,360 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:47:47,360 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:47:47,361 DEBUG [RS:0;7f75e6015732:33997 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:47:47,361 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:47:47,361 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:47:47,361 INFO [RS:0;7f75e6015732:33997 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:47:47,361 DEBUG [RS:2;7f75e6015732:37663 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,361 INFO [RS:0;7f75e6015732:33997 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:47:47,361 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,37663,1733723266299' 2024-12-09T05:47:47,361 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:47:47,362 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:47:47,362 DEBUG [RS:2;7f75e6015732:37663 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:47:47,362 INFO [RS:2;7f75e6015732:37663 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:47:47,362 INFO [RS:2;7f75e6015732:37663 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:47:47,364 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:47,364 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(1482): Serving as 7f75e6015732,44413,1733723266257, RpcServer on 7f75e6015732/172.17.0.2:44413, sessionid=0x100bd84f3d20002 2024-12-09T05:47:47,364 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:47:47,364 DEBUG [RS:1;7f75e6015732:44413 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7f75e6015732,44413,1733723266257 2024-12-09T05:47:47,365 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,44413,1733723266257' 2024-12-09T05:47:47,365 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:47:47,365 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:47:47,366 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:47:47,366 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:47:47,366 DEBUG [RS:1;7f75e6015732:44413 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7f75e6015732,44413,1733723266257 2024-12-09T05:47:47,366 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,44413,1733723266257' 2024-12-09T05:47:47,366 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:47:47,367 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:47:47,367 DEBUG [RS:1;7f75e6015732:44413 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:47:47,367 INFO [RS:1;7f75e6015732:44413 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:47:47,367 INFO [RS:1;7f75e6015732:44413 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:47:47,462 WARN [7f75e6015732:33537 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:47:47,465 INFO [RS:2;7f75e6015732:37663 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T05:47:47,465 INFO [RS:0;7f75e6015732:33997 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T05:47:47,468 INFO [RS:1;7f75e6015732:44413 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T05:47:47,468 INFO [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C37663%2C1733723266299, suffix=, logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299, archiveDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs, maxLogs=32 2024-12-09T05:47:47,468 INFO [RS:0;7f75e6015732:33997 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C33997%2C1733723266172, suffix=, logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,33997,1733723266172, archiveDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs, maxLogs=32 2024-12-09T05:47:47,472 INFO [RS:1;7f75e6015732:44413 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C44413%2C1733723266257, suffix=, logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,44413,1733723266257, archiveDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs, maxLogs=32 2024-12-09T05:47:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-09T05:47:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-09T05:47:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-09T05:47:47,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-09T05:47:47,515 DEBUG [RS:2;7f75e6015732:37663 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299/7f75e6015732%2C37663%2C1733723266299.1733723267471, exclude list is [], retry=0 2024-12-09T05:47:47,515 DEBUG [RS:0;7f75e6015732:33997 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,33997,1733723266172/7f75e6015732%2C33997%2C1733723266172.1733723267471, exclude list is [], retry=0 2024-12-09T05:47:47,518 DEBUG [RS:1;7f75e6015732:44413 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,44413,1733723266257/7f75e6015732%2C44413%2C1733723266257.1733723267474, exclude list is [], retry=0 2024-12-09T05:47:47,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-09T05:47:47,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-09T05:47:47,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-09T05:47:47,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45353,DS-96ca065a-8ec9-4473-8f16-a302329ae6ef,DISK] 2024-12-09T05:47:47,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-09T05:47:47,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43547,DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882,DISK] 2024-12-09T05:47:47,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42767,DS-7e92fe02-692d-4b13-a119-abdd27e295f8,DISK] 2024-12-09T05:47:47,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42767,DS-7e92fe02-692d-4b13-a119-abdd27e295f8,DISK] 2024-12-09T05:47:47,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45353,DS-96ca065a-8ec9-4473-8f16-a302329ae6ef,DISK] 2024-12-09T05:47:47,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43547,DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882,DISK] 2024-12-09T05:47:47,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45353,DS-96ca065a-8ec9-4473-8f16-a302329ae6ef,DISK] 2024-12-09T05:47:47,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42767,DS-7e92fe02-692d-4b13-a119-abdd27e295f8,DISK] 2024-12-09T05:47:47,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43547,DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882,DISK] 2024-12-09T05:47:47,535 INFO [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299/7f75e6015732%2C37663%2C1733723266299.1733723267471 2024-12-09T05:47:47,536 INFO [RS:0;7f75e6015732:33997 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,33997,1733723266172/7f75e6015732%2C33997%2C1733723266172.1733723267471 2024-12-09T05:47:47,537 DEBUG [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36941:36941),(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:33465:33465)] 2024-12-09T05:47:47,537 DEBUG [RS:0;7f75e6015732:33997 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:33465:33465),(127.0.0.1/127.0.0.1:36941:36941)] 2024-12-09T05:47:47,538 INFO [RS:1;7f75e6015732:44413 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,44413,1733723266257/7f75e6015732%2C44413%2C1733723266257.1733723267474 2024-12-09T05:47:47,538 DEBUG [RS:1;7f75e6015732:44413 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:33465:33465),(127.0.0.1/127.0.0.1:36941:36941)] 2024-12-09T05:47:47,716 DEBUG [7f75e6015732:33537 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T05:47:47,727 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(204): Hosts are {7f75e6015732=0} racks are {/default-rack=0} 2024-12-09T05:47:47,732 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T05:47:47,732 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T05:47:47,732 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T05:47:47,732 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T05:47:47,732 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T05:47:47,732 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T05:47:47,732 INFO [7f75e6015732:33537 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T05:47:47,732 INFO [7f75e6015732:33537 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T05:47:47,732 INFO [7f75e6015732:33537 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T05:47:47,733 DEBUG [7f75e6015732:33537 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T05:47:47,738 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7f75e6015732,37663,1733723266299 2024-12-09T05:47:47,745 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7f75e6015732,37663,1733723266299, state=OPENING 2024-12-09T05:47:47,749 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:47:47,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:47,751 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:47,751 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:47,751 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:47,752 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:47,753 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:47:47,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7f75e6015732,37663,1733723266299}] 2024-12-09T05:47:47,937 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:47:47,939 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54863, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:47:47,952 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T05:47:47,952 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T05:47:47,953 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T05:47:47,956 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C37663%2C1733723266299.meta, suffix=.meta, logDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299, archiveDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs, maxLogs=32 2024-12-09T05:47:47,973 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299/7f75e6015732%2C37663%2C1733723266299.meta.1733723267958.meta, exclude list is [], retry=0 2024-12-09T05:47:47,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45353,DS-96ca065a-8ec9-4473-8f16-a302329ae6ef,DISK] 2024-12-09T05:47:47,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43547,DS-b9a4b662-e2c0-442f-acd9-a3635b1b7882,DISK] 2024-12-09T05:47:47,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42767,DS-7e92fe02-692d-4b13-a119-abdd27e295f8,DISK] 2024-12-09T05:47:47,980 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/WALs/7f75e6015732,37663,1733723266299/7f75e6015732%2C37663%2C1733723266299.meta.1733723267958.meta 2024-12-09T05:47:47,980 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:33465:33465),(127.0.0.1/127.0.0.1:36941:36941)] 2024-12-09T05:47:47,981 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:47:47,982 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:47:47,984 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:47:47,988 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:47:47,991 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:47:47,992 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:47,992 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T05:47:47,992 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T05:47:47,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:47:47,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:47:47,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:47,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T05:47:47,998 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T05:47:47,998 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:47,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:47,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:47:48,001 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:47:48,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:48,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:48,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:47:48,003 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:47:48,003 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:48,004 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:48,004 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T05:47:48,006 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740 2024-12-09T05:47:48,008 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740 2024-12-09T05:47:48,010 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T05:47:48,010 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T05:47:48,011 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:47:48,013 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T05:47:48,015 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65817072, jitterRate=-0.01924920082092285}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:47:48,015 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T05:47:48,016 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733723267992Writing region info on filesystem at 1733723267992Initializing all the Stores at 1733723267994 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723267994Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723267994Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723267994Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723267994Cleaning up temporary data from old regions at 1733723268010 (+16 ms)Running coprocessor post-open hooks at 1733723268015 (+5 ms)Region opened successfully at 1733723268016 (+1 ms) 2024-12-09T05:47:48,022 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733723267927 2024-12-09T05:47:48,031 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:47:48,032 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T05:47:48,034 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7f75e6015732,37663,1733723266299 2024-12-09T05:47:48,036 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7f75e6015732,37663,1733723266299, state=OPEN 2024-12-09T05:47:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:48,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:48,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:48,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:48,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:48,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:48,038 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7f75e6015732,37663,1733723266299 2024-12-09T05:47:48,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:47:48,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7f75e6015732,37663,1733723266299 in 283 msec 2024-12-09T05:47:48,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:47:48,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 751 msec 2024-12-09T05:47:48,053 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:48,053 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T05:47:48,070 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T05:47:48,072 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7f75e6015732,37663,1733723266299, seqNum=-1] 2024-12-09T05:47:48,091 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:47:48,093 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39087, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:47:48,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0430 sec 2024-12-09T05:47:48,129 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733723268129, completionTime=-1 2024-12-09T05:47:48,132 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T05:47:48,132 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T05:47:48,155 INFO [master/7f75e6015732:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T05:47:48,155 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733723328155 2024-12-09T05:47:48,156 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733723388155 2024-12-09T05:47:48,156 INFO [master/7f75e6015732:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 23 msec 2024-12-09T05:47:48,157 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T05:47:48,166 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33537,1733723265612-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,167 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33537,1733723265612-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,167 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33537,1733723265612-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,168 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7f75e6015732:33537, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,169 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,171 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,175 DEBUG [master/7f75e6015732:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T05:47:48,201 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.832sec 2024-12-09T05:47:48,202 INFO [master/7f75e6015732:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:47:48,203 INFO [master/7f75e6015732:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:47:48,204 INFO [master/7f75e6015732:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:47:48,204 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:47:48,204 INFO [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:47:48,205 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33537,1733723265612-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:48,205 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33537,1733723265612-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:47:48,210 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:47:48,211 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:47:48,211 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,33537,1733723265612-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:48,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f32acaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:47:48,243 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T05:47:48,244 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T05:47:48,247 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7f75e6015732,33537,-1 for getting cluster id 2024-12-09T05:47:48,250 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T05:47:48,258 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fbd63f94-6cc1-4c40-abcd-c633bb1e899f' 2024-12-09T05:47:48,260 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T05:47:48,261 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fbd63f94-6cc1-4c40-abcd-c633bb1e899f" 2024-12-09T05:47:48,261 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@666de584, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:47:48,261 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7f75e6015732,33537,-1] 2024-12-09T05:47:48,264 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T05:47:48,266 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:48,267 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T05:47:48,270 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9fe7c2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:47:48,271 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T05:47:48,278 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7f75e6015732,37663,1733723266299, seqNum=-1] 2024-12-09T05:47:48,279 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:47:48,281 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:47:48,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7f75e6015732,33537,1733723265612 2024-12-09T05:47:48,306 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T05:47:48,311 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7f75e6015732,33537,1733723265612 2024-12-09T05:47:48,313 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@612235af 2024-12-09T05:47:48,314 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:47:48,316 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56976, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:47:48,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:47:48,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T05:47:48,332 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:47:48,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T05:47:48,334 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:48,337 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:47:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:48,346 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:48,346 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:48,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:55168 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55168 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:48,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-09T05:47:48,360 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:48,364 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9efb050931efd7fd9f2558c289bc039c, NAME => 'TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05 2024-12-09T05:47:48,370 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:48,371 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:48,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:55198 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55198 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:48,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-09T05:47:48,382 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:48,382 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:48,382 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 9efb050931efd7fd9f2558c289bc039c, disabling compactions & flushes 2024-12-09T05:47:48,382 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. after waiting 0 ms 2024-12-09T05:47:48,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,383 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,383 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9efb050931efd7fd9f2558c289bc039c: Waiting for close lock at 1733723268382Disabling compacts and flushes for region at 1733723268382Disabling writes for close at 1733723268383 (+1 ms)Writing region close event to WAL at 1733723268383Closed at 1733723268383 2024-12-09T05:47:48,385 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:47:48,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733723268385"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733723268385"}]},"ts":"1733723268385"} 2024-12-09T05:47:48,395 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T05:47:48,397 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:47:48,400 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733723268397"}]},"ts":"1733723268397"} 2024-12-09T05:47:48,404 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T05:47:48,405 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {7f75e6015732=0} racks are {/default-rack=0} 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T05:47:48,407 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T05:47:48,407 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T05:47:48,407 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T05:47:48,407 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T05:47:48,408 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9efb050931efd7fd9f2558c289bc039c, ASSIGN}] 2024-12-09T05:47:48,411 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9efb050931efd7fd9f2558c289bc039c, ASSIGN 2024-12-09T05:47:48,413 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9efb050931efd7fd9f2558c289bc039c, ASSIGN; state=OFFLINE, location=7f75e6015732,33997,1733723266172; forceNewPlan=false, retain=false 2024-12-09T05:47:48,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:48,566 INFO [7f75e6015732:33537 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T05:47:48,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9efb050931efd7fd9f2558c289bc039c, regionState=OPENING, regionLocation=7f75e6015732,33997,1733723266172 2024-12-09T05:47:48,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9efb050931efd7fd9f2558c289bc039c, ASSIGN because future has completed 2024-12-09T05:47:48,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9efb050931efd7fd9f2558c289bc039c, server=7f75e6015732,33997,1733723266172}] 2024-12-09T05:47:48,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:48,731 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:47:48,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37619, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:47:48,744 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,744 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9efb050931efd7fd9f2558c289bc039c, NAME => 'TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:47:48,745 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,745 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:48,745 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,746 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,749 INFO [StoreOpener-9efb050931efd7fd9f2558c289bc039c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,751 INFO [StoreOpener-9efb050931efd7fd9f2558c289bc039c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9efb050931efd7fd9f2558c289bc039c columnFamilyName cf 2024-12-09T05:47:48,751 DEBUG [StoreOpener-9efb050931efd7fd9f2558c289bc039c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:48,752 INFO [StoreOpener-9efb050931efd7fd9f2558c289bc039c-1 {}] regionserver.HStore(327): Store=9efb050931efd7fd9f2558c289bc039c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:48,752 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,753 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,754 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,754 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,754 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,757 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,762 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:47:48,763 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9efb050931efd7fd9f2558c289bc039c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69783729, jitterRate=0.03985859453678131}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:47:48,763 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:48,764 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9efb050931efd7fd9f2558c289bc039c: Running coprocessor pre-open hook at 1733723268746Writing region info on filesystem at 1733723268746Initializing all the Stores at 1733723268748 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723268748Cleaning up temporary data from old regions at 1733723268754 (+6 ms)Running coprocessor post-open hooks at 1733723268763 (+9 ms)Region opened successfully at 1733723268764 (+1 ms) 2024-12-09T05:47:48,766 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c., pid=6, masterSystemTime=1733723268730 2024-12-09T05:47:48,769 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,769 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:48,770 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9efb050931efd7fd9f2558c289bc039c, regionState=OPEN, openSeqNum=2, regionLocation=7f75e6015732,33997,1733723266172 2024-12-09T05:47:48,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9efb050931efd7fd9f2558c289bc039c, server=7f75e6015732,33997,1733723266172 because future has completed 2024-12-09T05:47:48,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:47:48,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9efb050931efd7fd9f2558c289bc039c, server=7f75e6015732,33997,1733723266172 in 201 msec 2024-12-09T05:47:48,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:47:48,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9efb050931efd7fd9f2558c289bc039c, ASSIGN in 371 msec 2024-12-09T05:47:48,784 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:47:48,784 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733723268784"}]},"ts":"1733723268784"} 2024-12-09T05:47:48,787 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T05:47:48,788 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:47:48,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 463 msec 2024-12-09T05:47:48,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:48,967 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T05:47:48,967 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T05:47:48,969 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:47:48,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T05:47:48,976 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:47:48,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T05:47:48,986 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c., hostname=7f75e6015732,33997,1733723266172, seqNum=2] 2024-12-09T05:47:48,987 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:47:48,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:47:48,997 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T05:47:49,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T05:47:49,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T05:47:49,006 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:47:49,007 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:47:49,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:47:49,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T05:47:49,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33997 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T05:47:49,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,179 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 9efb050931efd7fd9f2558c289bc039c 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T05:47:49,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/.tmp/cf/5fb9d6a1dbd34dc8a0b9787457f8d1b8 is 36, key is row/cf:cq/1733723268990/Put/seqid=0 2024-12-09T05:47:49,232 WARN [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,232 WARN [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,235 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1923868789_22 at /127.0.0.1:51552 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51552 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-09T05:47:49,241 WARN [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,241 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/.tmp/cf/5fb9d6a1dbd34dc8a0b9787457f8d1b8 2024-12-09T05:47:49,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/.tmp/cf/5fb9d6a1dbd34dc8a0b9787457f8d1b8 as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/cf/5fb9d6a1dbd34dc8a0b9787457f8d1b8 2024-12-09T05:47:49,289 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/cf/5fb9d6a1dbd34dc8a0b9787457f8d1b8, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T05:47:49,295 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 9efb050931efd7fd9f2558c289bc039c in 117ms, sequenceid=5, compaction requested=false 2024-12-09T05:47:49,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-09T05:47:49,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 9efb050931efd7fd9f2558c289bc039c: 2024-12-09T05:47:49,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T05:47:49,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T05:47:49,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T05:47:49,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 294 msec 2024-12-09T05:47:49,311 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 309 msec 2024-12-09T05:47:49,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33537 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T05:47:49,325 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T05:47:49,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T05:47:49,339 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T05:47:49,339 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:49,343 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,344 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T05:47:49,344 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:47:49,344 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1296338422, stopped=false 2024-12-09T05:47:49,344 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7f75e6015732,33537,1733723265612 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:49,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:49,346 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T05:47:49,346 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:49,346 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T05:47:49,347 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:49,347 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:49,347 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:49,347 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:49,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,347 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7f75e6015732,33997,1733723266172' ***** 2024-12-09T05:47:49,347 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T05:47:49,347 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7f75e6015732,44413,1733723266257' ***** 2024-12-09T05:47:49,347 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T05:47:49,347 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7f75e6015732,37663,1733723266299' ***** 2024-12-09T05:47:49,348 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T05:47:49,348 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:47:49,348 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:47:49,348 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T05:47:49,348 INFO [RS:0;7f75e6015732:33997 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:47:49,348 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:47:49,348 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T05:47:49,348 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T05:47:49,348 INFO [RS:2;7f75e6015732:37663 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:47:49,348 INFO [RS:0;7f75e6015732:33997 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(959): stopping server 7f75e6015732,37663,1733723266299 2024-12-09T05:47:49,349 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(3091): Received CLOSE for 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:49,349 INFO [RS:1;7f75e6015732:44413 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;7f75e6015732:37663. 2024-12-09T05:47:49,349 INFO [RS:1;7f75e6015732:44413 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:47:49,349 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(959): stopping server 7f75e6015732,44413,1733723266257 2024-12-09T05:47:49,349 INFO [RS:1;7f75e6015732:44413 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:49,349 DEBUG [RS:2;7f75e6015732:37663 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:49,349 INFO [RS:1;7f75e6015732:44413 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7f75e6015732:44413. 2024-12-09T05:47:49,349 DEBUG [RS:2;7f75e6015732:37663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,349 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(959): stopping server 7f75e6015732,33997,1733723266172 2024-12-09T05:47:49,349 DEBUG [RS:1;7f75e6015732:44413 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:49,349 INFO [RS:0;7f75e6015732:33997 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:49,349 DEBUG [RS:1;7f75e6015732:44413 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,349 INFO [RS:0;7f75e6015732:33997 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7f75e6015732:33997. 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:47:49,349 DEBUG [RS:0;7f75e6015732:33997 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:49,349 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(976): stopping server 7f75e6015732,44413,1733723266257; all regions closed. 2024-12-09T05:47:49,349 INFO [RS:2;7f75e6015732:37663 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:47:49,350 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T05:47:49,350 DEBUG [RS:0;7f75e6015732:33997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,350 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9efb050931efd7fd9f2558c289bc039c, disabling compactions & flushes 2024-12-09T05:47:49,350 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T05:47:49,350 INFO [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,350 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1325): Online Regions={9efb050931efd7fd9f2558c289bc039c=TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c.} 2024-12-09T05:47:49,350 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,350 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. after waiting 0 ms 2024-12-09T05:47:49,350 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T05:47:49,350 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T05:47:49,350 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,350 DEBUG [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1351): Waiting on 9efb050931efd7fd9f2558c289bc039c 2024-12-09T05:47:49,350 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:47:49,350 DEBUG [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T05:47:49,350 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T05:47:49,350 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T05:47:49,351 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:47:49,351 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:47:49,351 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T05:47:49,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_1073741828_1018 (size=93) 2024-12-09T05:47:49,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_1073741828_1018 (size=93) 2024-12-09T05:47:49,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741828_1018 (size=93) 2024-12-09T05:47:49,363 INFO [regionserver/7f75e6015732:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T05:47:49,363 INFO [regionserver/7f75e6015732:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T05:47:49,367 DEBUG [RS:1;7f75e6015732:44413 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs 2024-12-09T05:47:49,367 INFO [RS:1;7f75e6015732:44413 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7f75e6015732%2C44413%2C1733723266257:(num 1733723267474) 2024-12-09T05:47:49,367 DEBUG [RS:1;7f75e6015732:44413 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,367 INFO [RS:1;7f75e6015732:44413 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:49,367 INFO [RS:1;7f75e6015732:44413 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:49,368 INFO [RS:1;7f75e6015732:44413 {}] hbase.ChoreService(370): Chore service for: regionserver/7f75e6015732:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:49,368 INFO [RS:1;7f75e6015732:44413 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:47:49,368 INFO [regionserver/7f75e6015732:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:49,368 INFO [RS:1;7f75e6015732:44413 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:47:49,368 INFO [RS:1;7f75e6015732:44413 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:47:49,368 INFO [RS:1;7f75e6015732:44413 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:49,369 INFO [RS:1;7f75e6015732:44413 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44413 2024-12-09T05:47:49,369 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/default/TestHBaseWalOnEC/9efb050931efd7fd9f2558c289bc039c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T05:47:49,371 INFO [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,371 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9efb050931efd7fd9f2558c289bc039c: Waiting for close lock at 1733723269349Running coprocessor pre-close hooks at 1733723269350 (+1 ms)Disabling compacts and flushes for region at 1733723269350Disabling writes for close at 1733723269350Writing region close event to WAL at 1733723269352 (+2 ms)Running coprocessor post-close hooks at 1733723269370 (+18 ms)Closed at 1733723269370 2024-12-09T05:47:49,371 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c. 2024-12-09T05:47:49,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:49,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7f75e6015732,44413,1733723266257 2024-12-09T05:47:49,372 INFO [RS:1;7f75e6015732:44413 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:49,373 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7f75e6015732,44413,1733723266257] 2024-12-09T05:47:49,374 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7f75e6015732,44413,1733723266257 already deleted, retry=false 2024-12-09T05:47:49,374 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7f75e6015732,44413,1733723266257 expired; onlineServers=2 2024-12-09T05:47:49,385 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/info/f63a8f5e59b8416db9a1d471abbff920 is 153, key is TestHBaseWalOnEC,,1733723268318.9efb050931efd7fd9f2558c289bc039c./info:regioninfo/1733723268769/Put/seqid=0 2024-12-09T05:47:49,388 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,388 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,392 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-810890660_22 at /127.0.0.1:47606 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:43547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47606 dst: /127.0.0.1:43547 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-09T05:47:49,396 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,396 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/info/f63a8f5e59b8416db9a1d471abbff920 2024-12-09T05:47:49,424 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/ns/3162f2cb7cce4d8585517b23d40b673f is 43, key is default/ns:d/1733723268097/Put/seqid=0 2024-12-09T05:47:49,426 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,426 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,429 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-810890660_22 at /127.0.0.1:51566 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51566 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,430 INFO [regionserver/7f75e6015732:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:49,430 INFO [regionserver/7f75e6015732:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:49,430 INFO [regionserver/7f75e6015732:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:49,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-09T05:47:49,434 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,434 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/ns/3162f2cb7cce4d8585517b23d40b673f 2024-12-09T05:47:49,457 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/table/5281b970b0044ecea19532e913063e61 is 52, key is TestHBaseWalOnEC/table:state/1733723268784/Put/seqid=0 2024-12-09T05:47:49,459 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,459 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,461 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-810890660_22 at /127.0.0.1:47628 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47628 dst: /127.0.0.1:43547 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-09T05:47:49,466 WARN [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,466 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/table/5281b970b0044ecea19532e913063e61 2024-12-09T05:47:49,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44413-0x100bd84f3d20002, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,474 INFO [RS:1;7f75e6015732:44413 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:49,474 INFO [RS:1;7f75e6015732:44413 {}] regionserver.HRegionServer(1031): Exiting; stopping=7f75e6015732,44413,1733723266257; zookeeper connection closed. 2024-12-09T05:47:49,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b943dfd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b943dfd 2024-12-09T05:47:49,477 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/info/f63a8f5e59b8416db9a1d471abbff920 as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/info/f63a8f5e59b8416db9a1d471abbff920 2024-12-09T05:47:49,487 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/info/f63a8f5e59b8416db9a1d471abbff920, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T05:47:49,489 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/ns/3162f2cb7cce4d8585517b23d40b673f as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/ns/3162f2cb7cce4d8585517b23d40b673f 2024-12-09T05:47:49,500 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/ns/3162f2cb7cce4d8585517b23d40b673f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T05:47:49,502 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/.tmp/table/5281b970b0044ecea19532e913063e61 as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/table/5281b970b0044ecea19532e913063e61 2024-12-09T05:47:49,513 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/table/5281b970b0044ecea19532e913063e61, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T05:47:49,515 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 164ms, sequenceid=11, compaction requested=false 2024-12-09T05:47:49,515 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T05:47:49,525 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T05:47:49,527 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:47:49,527 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T05:47:49,527 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733723269350Running coprocessor pre-close hooks at 1733723269350Disabling compacts and flushes for region at 1733723269350Disabling writes for close at 1733723269351 (+1 ms)Obtaining lock to block concurrent updates at 1733723269351Preparing flush snapshotting stores in 1588230740 at 1733723269351Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733723269352 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733723269353 (+1 ms)Flushing 1588230740/info: creating writer at 1733723269354 (+1 ms)Flushing 1588230740/info: appending metadata at 1733723269382 (+28 ms)Flushing 1588230740/info: closing flushed file at 1733723269382Flushing 1588230740/ns: creating writer at 1733723269406 (+24 ms)Flushing 1588230740/ns: appending metadata at 1733723269423 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733723269423Flushing 1588230740/table: creating writer at 1733723269442 (+19 ms)Flushing 1588230740/table: appending metadata at 1733723269456 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733723269456Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53926fd9: reopening flushed file at 1733723269475 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fe12cdb: reopening flushed file at 1733723269488 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@409e8c39: reopening flushed file at 1733723269501 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 164ms, sequenceid=11, compaction requested=false at 1733723269515 (+14 ms)Writing region close event to WAL at 1733723269518 (+3 ms)Running coprocessor post-close hooks at 1733723269526 (+8 ms)Closed at 1733723269527 (+1 ms) 2024-12-09T05:47:49,527 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:47:49,551 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(976): stopping server 7f75e6015732,33997,1733723266172; all regions closed. 2024-12-09T05:47:49,551 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(976): stopping server 7f75e6015732,37663,1733723266299; all regions closed. 2024-12-09T05:47:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_1073741826_1016 (size=1298) 2024-12-09T05:47:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741829_1019 (size=2751) 2024-12-09T05:47:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741826_1016 (size=1298) 2024-12-09T05:47:49,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_1073741829_1019 (size=2751) 2024-12-09T05:47:49,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_1073741826_1016 (size=1298) 2024-12-09T05:47:49,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_1073741829_1019 (size=2751) 2024-12-09T05:47:49,559 DEBUG [RS:0;7f75e6015732:33997 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs 2024-12-09T05:47:49,559 INFO [RS:0;7f75e6015732:33997 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7f75e6015732%2C33997%2C1733723266172:(num 1733723267471) 2024-12-09T05:47:49,560 DEBUG [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs 2024-12-09T05:47:49,560 DEBUG [RS:0;7f75e6015732:33997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:49,560 INFO [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7f75e6015732%2C37663%2C1733723266299.meta:.meta(num 1733723267958) 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] hbase.ChoreService(370): Chore service for: regionserver/7f75e6015732:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:47:49,560 INFO [regionserver/7f75e6015732:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:47:49,560 INFO [RS:0;7f75e6015732:33997 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:49,561 INFO [RS:0;7f75e6015732:33997 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33997 2024-12-09T05:47:49,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:49,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7f75e6015732,33997,1733723266172 2024-12-09T05:47:49,562 INFO [RS:0;7f75e6015732:33997 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:49,564 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7f75e6015732,33997,1733723266172] 2024-12-09T05:47:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_1073741827_1017 (size=93) 2024-12-09T05:47:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741827_1017 (size=93) 2024-12-09T05:47:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_1073741827_1017 (size=93) 2024-12-09T05:47:49,565 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7f75e6015732,33997,1733723266172 already deleted, retry=false 2024-12-09T05:47:49,565 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7f75e6015732,33997,1733723266172 expired; onlineServers=1 2024-12-09T05:47:49,567 DEBUG [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/oldWALs 2024-12-09T05:47:49,568 INFO [RS:2;7f75e6015732:37663 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7f75e6015732%2C37663%2C1733723266299:(num 1733723267471) 2024-12-09T05:47:49,568 DEBUG [RS:2;7f75e6015732:37663 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:49,568 INFO [RS:2;7f75e6015732:37663 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:49,568 INFO [RS:2;7f75e6015732:37663 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:49,568 INFO [RS:2;7f75e6015732:37663 {}] hbase.ChoreService(370): Chore service for: regionserver/7f75e6015732:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:49,568 INFO [RS:2;7f75e6015732:37663 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:49,568 INFO [regionserver/7f75e6015732:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:49,568 INFO [RS:2;7f75e6015732:37663 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37663 2024-12-09T05:47:49,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:49,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7f75e6015732,37663,1733723266299 2024-12-09T05:47:49,569 INFO [RS:2;7f75e6015732:37663 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:49,570 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7f75e6015732,37663,1733723266299] 2024-12-09T05:47:49,571 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7f75e6015732,37663,1733723266299 already deleted, retry=false 2024-12-09T05:47:49,571 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7f75e6015732,37663,1733723266299 expired; onlineServers=0 2024-12-09T05:47:49,571 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7f75e6015732,33537,1733723265612' ***** 2024-12-09T05:47:49,571 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:47:49,572 INFO [M:0;7f75e6015732:33537 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:49,572 INFO [M:0;7f75e6015732:33537 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:49,572 DEBUG [M:0;7f75e6015732:33537 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:47:49,572 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:47:49,572 DEBUG [master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.small.0-1733723267165 {}] cleaner.HFileCleaner(306): Exit Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.small.0-1733723267165,5,FailOnTimeoutGroup] 2024-12-09T05:47:49,572 DEBUG [M:0;7f75e6015732:33537 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:47:49,572 DEBUG [master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.large.0-1733723267162 {}] cleaner.HFileCleaner(306): Exit Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.large.0-1733723267162,5,FailOnTimeoutGroup] 2024-12-09T05:47:49,572 INFO [M:0;7f75e6015732:33537 {}] hbase.ChoreService(370): Chore service for: master/7f75e6015732:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:49,572 INFO [M:0;7f75e6015732:33537 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:49,572 DEBUG [M:0;7f75e6015732:33537 {}] master.HMaster(1795): Stopping service threads 2024-12-09T05:47:49,572 INFO [M:0;7f75e6015732:33537 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:47:49,573 INFO [M:0;7f75e6015732:33537 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T05:47:49,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:49,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:49,573 INFO [M:0;7f75e6015732:33537 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:47:49,573 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:47:49,574 DEBUG [M:0;7f75e6015732:33537 {}] zookeeper.ZKUtil(347): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:47:49,574 WARN [M:0;7f75e6015732:33537 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:47:49,575 INFO [M:0;7f75e6015732:33537 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/.lastflushedseqids 2024-12-09T05:47:49,583 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,583 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,585 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:47652 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47652 dst: /127.0.0.1:43547 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-09T05:47:49,590 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,590 INFO [M:0;7f75e6015732:33537 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T05:47:49,590 INFO [M:0;7f75e6015732:33537 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:47:49,590 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:47:49,590 INFO [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:49,590 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:49,591 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:47:49,591 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:49,591 INFO [M:0;7f75e6015732:33537 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T05:47:49,609 DEBUG [M:0;7f75e6015732:33537 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1488819e48a647b6bf4db9445b0130a1 is 82, key is hbase:meta,,1/info:regioninfo/1733723268033/Put/seqid=0 2024-12-09T05:47:49,611 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,611 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,614 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:51588 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:42767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51588 dst: /127.0.0.1:42767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-09T05:47:49,619 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,619 INFO [M:0;7f75e6015732:33537 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1488819e48a647b6bf4db9445b0130a1 2024-12-09T05:47:49,644 DEBUG [M:0;7f75e6015732:33537 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12b33a054f1f47dd9f2f7640101b0355 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733723268790/Put/seqid=0 2024-12-09T05:47:49,645 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,646 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:36534 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:45353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36534 dst: /127.0.0.1:45353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-09T05:47:49,654 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,655 INFO [M:0;7f75e6015732:33537 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12b33a054f1f47dd9f2f7640101b0355 2024-12-09T05:47:49,664 INFO [RS:0;7f75e6015732:33997 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:49,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,664 INFO [RS:0;7f75e6015732:33997 {}] regionserver.HRegionServer(1031): Exiting; stopping=7f75e6015732,33997,1733723266172; zookeeper connection closed. 2024-12-09T05:47:49,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33997-0x100bd84f3d20001, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,665 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c85f323 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c85f323 2024-12-09T05:47:49,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,670 INFO [RS:2;7f75e6015732:37663 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:49,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37663-0x100bd84f3d20003, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,671 INFO [RS:2;7f75e6015732:37663 {}] regionserver.HRegionServer(1031): Exiting; stopping=7f75e6015732,37663,1733723266299; zookeeper connection closed. 2024-12-09T05:47:49,671 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fad7fdd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fad7fdd 2024-12-09T05:47:49,671 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T05:47:49,680 DEBUG [M:0;7f75e6015732:33537 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2dbf8ae8a8094588bf2e9c020327123b is 69, key is 7f75e6015732,33997,1733723266172/rs:state/1733723267237/Put/seqid=0 2024-12-09T05:47:49,682 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,682 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T05:47:49,684 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1836317412_22 at /127.0.0.1:36546 [Receiving block BP-1006126246-172.17.0.2-1733723262988:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:45353:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36546 dst: /127.0.0.1:45353 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T05:47:49,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-09T05:47:49,689 WARN [M:0;7f75e6015732:33537 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T05:47:49,689 INFO [M:0;7f75e6015732:33537 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2dbf8ae8a8094588bf2e9c020327123b 2024-12-09T05:47:49,697 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1488819e48a647b6bf4db9445b0130a1 as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1488819e48a647b6bf4db9445b0130a1 2024-12-09T05:47:49,705 INFO [M:0;7f75e6015732:33537 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1488819e48a647b6bf4db9445b0130a1, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T05:47:49,707 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12b33a054f1f47dd9f2f7640101b0355 as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12b33a054f1f47dd9f2f7640101b0355 2024-12-09T05:47:49,715 INFO [M:0;7f75e6015732:33537 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12b33a054f1f47dd9f2f7640101b0355, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T05:47:49,716 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2dbf8ae8a8094588bf2e9c020327123b as hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2dbf8ae8a8094588bf2e9c020327123b 2024-12-09T05:47:49,723 INFO [M:0;7f75e6015732:33537 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2dbf8ae8a8094588bf2e9c020327123b, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T05:47:49,725 INFO [M:0;7f75e6015732:33537 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=72, compaction requested=false 2024-12-09T05:47:49,726 INFO [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:49,726 DEBUG [M:0;7f75e6015732:33537 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733723269590Disabling compacts and flushes for region at 1733723269590Disabling writes for close at 1733723269591 (+1 ms)Obtaining lock to block concurrent updates at 1733723269591Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733723269591Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733723269592 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733723269592Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733723269593 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733723269609 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733723269609Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733723269627 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733723269643 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733723269643Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733723269662 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733723269680 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733723269680Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b0517e1: reopening flushed file at 1733723269696 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ee75f3f: reopening flushed file at 1733723269705 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39fdf615: reopening flushed file at 1733723269715 (+10 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=72, compaction requested=false at 1733723269725 (+10 ms)Writing region close event to WAL at 1733723269726 (+1 ms)Closed at 1733723269726 2024-12-09T05:47:49,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42767 is added to blk_1073741825_1011 (size=32683) 2024-12-09T05:47:49,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45353 is added to blk_1073741825_1011 (size=32683) 2024-12-09T05:47:49,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43547 is added to blk_1073741825_1011 (size=32683) 2024-12-09T05:47:49,730 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:49,730 INFO [M:0;7f75e6015732:33537 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T05:47:49,730 INFO [M:0;7f75e6015732:33537 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33537 2024-12-09T05:47:49,730 INFO [M:0;7f75e6015732:33537 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:49,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,832 INFO [M:0;7f75e6015732:33537 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:49,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33537-0x100bd84f3d20000, quorum=127.0.0.1:49886, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:49,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:49,843 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:49,844 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:49,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:49,844 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:49,847 WARN [BP-1006126246-172.17.0.2-1733723262988 heartbeating to localhost/127.0.0.1:43973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:47:49,847 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:47:49,847 WARN [BP-1006126246-172.17.0.2-1733723262988 heartbeating to localhost/127.0.0.1:43973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1006126246-172.17.0.2-1733723262988 (Datanode Uuid 72f28de5-dd1c-47d0-90a1-baeea8cd3857) service to localhost/127.0.0.1:43973 2024-12-09T05:47:49,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:47:49,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data5/current/BP-1006126246-172.17.0.2-1733723262988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:49,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data6/current/BP-1006126246-172.17.0.2-1733723262988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:49,848 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:47:49,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:49,850 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:49,850 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:49,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:49,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:49,852 WARN [BP-1006126246-172.17.0.2-1733723262988 heartbeating to localhost/127.0.0.1:43973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:47:49,852 WARN [BP-1006126246-172.17.0.2-1733723262988 heartbeating to localhost/127.0.0.1:43973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1006126246-172.17.0.2-1733723262988 (Datanode Uuid b33d9565-76f7-4382-821e-f7b88b509044) service to localhost/127.0.0.1:43973 2024-12-09T05:47:49,852 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:47:49,852 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:47:49,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data3/current/BP-1006126246-172.17.0.2-1733723262988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:49,852 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data4/current/BP-1006126246-172.17.0.2-1733723262988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:49,853 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:47:49,854 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:49,855 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:49,855 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:49,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:49,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:49,856 WARN [BP-1006126246-172.17.0.2-1733723262988 heartbeating to localhost/127.0.0.1:43973 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:47:49,856 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:47:49,856 WARN [BP-1006126246-172.17.0.2-1733723262988 heartbeating to localhost/127.0.0.1:43973 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1006126246-172.17.0.2-1733723262988 (Datanode Uuid 0012ce7d-5b78-402f-a5b7-4b40c54c9d6e) service to localhost/127.0.0.1:43973 2024-12-09T05:47:49,856 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:47:49,857 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data1/current/BP-1006126246-172.17.0.2-1733723262988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:49,857 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/cluster_124ff123-fa93-e8fc-8fb8-7be49ba34eed/data/data2/current/BP-1006126246-172.17.0.2-1733723262988 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:49,857 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:47:49,867 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:47:49,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:49,868 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:49,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:49,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:49,876 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:47:49,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T05:47:49,908 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=91 (was 160), OpenFileDescriptor=439 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=380 (was 378) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8665 (was 8969) 2024-12-09T05:47:49,913 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=91, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=380, ProcessCount=11, AvailableMemoryMB=8665 2024-12-09T05:47:49,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T05:47:49,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.log.dir so I do NOT create it in target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab 2024-12-09T05:47:49,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db2c8763-73e0-a888-263b-9c20a85925c3/hadoop.tmp.dir so I do NOT create it in target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab 2024-12-09T05:47:49,913 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006, deleteOnExit=true 2024-12-09T05:47:49,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/test.cache.data in system properties and HBase conf 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir in system properties and HBase conf 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T05:47:49,914 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T05:47:49,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/nfs.dump.dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/java.io.tmpdir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T05:47:49,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T05:47:49,977 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:49,982 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:49,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:49,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:49,983 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:47:49,984 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:49,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@715f09c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:49,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71b7cabb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:50,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@46039787{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/java.io.tmpdir/jetty-localhost-41385-hadoop-hdfs-3_4_1-tests_jar-_-any-11190794304028989091/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:47:50,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7326bb42{HTTP/1.1, (http/1.1)}{localhost:41385} 2024-12-09T05:47:50,075 INFO [Time-limited test {}] server.Server(415): Started @8770ms 2024-12-09T05:47:50,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:50,144 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:50,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:50,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:50,145 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:47:50,149 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2591ff9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:50,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e494f88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:50,240 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b7198f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/java.io.tmpdir/jetty-localhost-43439-hadoop-hdfs-3_4_1-tests_jar-_-any-9226517653214979390/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:50,241 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69996946{HTTP/1.1, (http/1.1)}{localhost:43439} 2024-12-09T05:47:50,241 INFO [Time-limited test {}] server.Server(415): Started @8936ms 2024-12-09T05:47:50,242 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:47:50,273 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:50,275 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:50,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:50,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:50,276 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T05:47:50,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fee469f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:50,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ae51624{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:50,299 WARN [Thread-516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data1/current/BP-514775923-172.17.0.2-1733723269939/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:50,299 WARN [Thread-517 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data2/current/BP-514775923-172.17.0.2-1733723269939/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:50,313 WARN [Thread-495 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:47:50,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf480f29d519e0d9f with lease ID 0x25aef07a69ccad4c: Processing first storage report for DS-8a1b6c7d-e61c-4368-b55b-4587ed386ae9 from datanode DatanodeRegistration(127.0.0.1:38311, datanodeUuid=cdf4dd8b-003d-4f33-84b6-04f786215bcc, infoPort=40309, infoSecurePort=0, ipcPort=46035, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939) 2024-12-09T05:47:50,316 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf480f29d519e0d9f with lease ID 0x25aef07a69ccad4c: from storage DS-8a1b6c7d-e61c-4368-b55b-4587ed386ae9 node DatanodeRegistration(127.0.0.1:38311, datanodeUuid=cdf4dd8b-003d-4f33-84b6-04f786215bcc, infoPort=40309, infoSecurePort=0, ipcPort=46035, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:50,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf480f29d519e0d9f with lease ID 0x25aef07a69ccad4c: Processing first storage report for DS-87ae31f0-cf0a-4572-a36f-ec85888ba35e from datanode DatanodeRegistration(127.0.0.1:38311, datanodeUuid=cdf4dd8b-003d-4f33-84b6-04f786215bcc, infoPort=40309, infoSecurePort=0, ipcPort=46035, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939) 2024-12-09T05:47:50,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf480f29d519e0d9f with lease ID 0x25aef07a69ccad4c: from storage DS-87ae31f0-cf0a-4572-a36f-ec85888ba35e node DatanodeRegistration(127.0.0.1:38311, datanodeUuid=cdf4dd8b-003d-4f33-84b6-04f786215bcc, infoPort=40309, infoSecurePort=0, ipcPort=46035, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:50,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f02dcba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/java.io.tmpdir/jetty-localhost-35269-hadoop-hdfs-3_4_1-tests_jar-_-any-2763308650596079983/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:50,371 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15fff4df{HTTP/1.1, (http/1.1)}{localhost:35269} 2024-12-09T05:47:50,372 INFO [Time-limited test {}] server.Server(415): Started @9067ms 2024-12-09T05:47:50,373 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:47:50,398 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T05:47:50,401 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T05:47:50,401 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T05:47:50,401 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T05:47:50,401 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T05:47:50,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19f40ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,AVAILABLE} 2024-12-09T05:47:50,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ec581d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T05:47:50,429 WARN [Thread-551 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data3/current/BP-514775923-172.17.0.2-1733723269939/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:50,430 WARN [Thread-552 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data4/current/BP-514775923-172.17.0.2-1733723269939/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:50,443 WARN [Thread-531 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:47:50,446 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31cdfc6e7788d48f with lease ID 0x25aef07a69ccad4d: Processing first storage report for DS-62d70420-c8eb-4cd9-b18a-74149a7089e9 from datanode DatanodeRegistration(127.0.0.1:38537, datanodeUuid=c99d5ef1-5bfa-4d0c-8f19-e83d2dfec4f4, infoPort=41931, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939) 2024-12-09T05:47:50,446 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31cdfc6e7788d48f with lease ID 0x25aef07a69ccad4d: from storage DS-62d70420-c8eb-4cd9-b18a-74149a7089e9 node DatanodeRegistration(127.0.0.1:38537, datanodeUuid=c99d5ef1-5bfa-4d0c-8f19-e83d2dfec4f4, infoPort=41931, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:50,446 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31cdfc6e7788d48f with lease ID 0x25aef07a69ccad4d: Processing first storage report for DS-7d673bf3-ec58-4591-8f79-729914bb2fe3 from datanode DatanodeRegistration(127.0.0.1:38537, datanodeUuid=c99d5ef1-5bfa-4d0c-8f19-e83d2dfec4f4, infoPort=41931, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939) 2024-12-09T05:47:50,447 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31cdfc6e7788d48f with lease ID 0x25aef07a69ccad4d: from storage DS-7d673bf3-ec58-4591-8f79-729914bb2fe3 node DatanodeRegistration(127.0.0.1:38537, datanodeUuid=c99d5ef1-5bfa-4d0c-8f19-e83d2dfec4f4, infoPort=41931, infoSecurePort=0, ipcPort=38839, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:50,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61cce6fc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/java.io.tmpdir/jetty-localhost-39221-hadoop-hdfs-3_4_1-tests_jar-_-any-4627954850847613840/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:50,495 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64e2e59d{HTTP/1.1, (http/1.1)}{localhost:39221} 2024-12-09T05:47:50,495 INFO [Time-limited test {}] server.Server(415): Started @9190ms 2024-12-09T05:47:50,496 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T05:47:50,553 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data5/current/BP-514775923-172.17.0.2-1733723269939/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:50,553 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data6/current/BP-514775923-172.17.0.2-1733723269939/current, will proceed with Du for space computation calculation, 2024-12-09T05:47:50,571 WARN [Thread-566 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T05:47:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x561541a99d33b0c9 with lease ID 0x25aef07a69ccad4e: Processing first storage report for DS-d274d6b5-5c6c-4856-893e-732c3413af3a from datanode DatanodeRegistration(127.0.0.1:36435, datanodeUuid=46c9fd07-72f0-463d-a850-fa74c75782b8, infoPort=41867, infoSecurePort=0, ipcPort=46829, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939) 2024-12-09T05:47:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x561541a99d33b0c9 with lease ID 0x25aef07a69ccad4e: from storage DS-d274d6b5-5c6c-4856-893e-732c3413af3a node DatanodeRegistration(127.0.0.1:36435, datanodeUuid=46c9fd07-72f0-463d-a850-fa74c75782b8, infoPort=41867, infoSecurePort=0, ipcPort=46829, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T05:47:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x561541a99d33b0c9 with lease ID 0x25aef07a69ccad4e: Processing first storage report for DS-59f54725-e1cc-4469-a28b-d5f8307091a7 from datanode DatanodeRegistration(127.0.0.1:36435, datanodeUuid=46c9fd07-72f0-463d-a850-fa74c75782b8, infoPort=41867, infoSecurePort=0, ipcPort=46829, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939) 2024-12-09T05:47:50,574 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x561541a99d33b0c9 with lease ID 0x25aef07a69ccad4e: from storage DS-59f54725-e1cc-4469-a28b-d5f8307091a7 node DatanodeRegistration(127.0.0.1:36435, datanodeUuid=46c9fd07-72f0-463d-a850-fa74c75782b8, infoPort=41867, infoSecurePort=0, ipcPort=46829, storageInfo=lv=-57;cid=testClusterID;nsid=705875499;c=1733723269939), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T05:47:50,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab 2024-12-09T05:47:50,622 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/zookeeper_0, clientPort=61000, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T05:47:50,623 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61000 2024-12-09T05:47:50,623 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:47:50,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:47:50,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741825_1001 (size=7) 2024-12-09T05:47:50,638 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348 with version=8 2024-12-09T05:47:50,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43973/user/jenkins/test-data/c2a7d96b-c74b-bbe5-9cad-fea40c69ec05/hbase-staging 2024-12-09T05:47:50,640 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:50,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,640 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,640 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:50,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:50,641 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T05:47:50,641 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:50,641 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45055 2024-12-09T05:47:50,642 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45055 connecting to ZooKeeper ensemble=127.0.0.1:61000 2024-12-09T05:47:50,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450550x0, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:50,646 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45055-0x100bd850a4c0000 connected 2024-12-09T05:47:50,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,661 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:50,661 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348, hbase.cluster.distributed=false 2024-12-09T05:47:50,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:50,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45055 2024-12-09T05:47:50,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45055 2024-12-09T05:47:50,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45055 2024-12-09T05:47:50,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45055 2024-12-09T05:47:50,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45055 2024-12-09T05:47:50,682 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:50,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,682 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:50,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,682 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:50,682 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:47:50,683 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:50,683 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34875 2024-12-09T05:47:50,684 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34875 connecting to ZooKeeper ensemble=127.0.0.1:61000 2024-12-09T05:47:50,685 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348750x0, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:50,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:50,690 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34875-0x100bd850a4c0001 connected 2024-12-09T05:47:50,691 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:47:50,691 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:47:50,692 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:47:50,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:50,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34875 2024-12-09T05:47:50,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34875 2024-12-09T05:47:50,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34875 2024-12-09T05:47:50,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34875 2024-12-09T05:47:50,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34875 2024-12-09T05:47:50,708 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:50,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,708 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:50,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:50,708 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:47:50,709 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:50,709 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41291 2024-12-09T05:47:50,710 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41291 connecting to ZooKeeper ensemble=127.0.0.1:61000 2024-12-09T05:47:50,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,712 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412910x0, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:50,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412910x0, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:50,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41291-0x100bd850a4c0002 connected 2024-12-09T05:47:50,716 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:47:50,717 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:47:50,718 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:47:50,719 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:50,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41291 2024-12-09T05:47:50,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41291 2024-12-09T05:47:50,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41291 2024-12-09T05:47:50,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41291 2024-12-09T05:47:50,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41291 2024-12-09T05:47:50,733 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7f75e6015732:0 server-side Connection retries=45 2024-12-09T05:47:50,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,734 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T05:47:50,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T05:47:50,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T05:47:50,734 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T05:47:50,734 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T05:47:50,734 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38343 2024-12-09T05:47:50,736 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38343 connecting to ZooKeeper ensemble=127.0.0.1:61000 2024-12-09T05:47:50,736 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,738 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:383430x0, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T05:47:50,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:50,742 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38343-0x100bd850a4c0003 connected 2024-12-09T05:47:50,742 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T05:47:50,743 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T05:47:50,743 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T05:47:50,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T05:47:50,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38343 2024-12-09T05:47:50,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38343 2024-12-09T05:47:50,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38343 2024-12-09T05:47:50,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38343 2024-12-09T05:47:50,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38343 2024-12-09T05:47:50,757 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7f75e6015732:45055 2024-12-09T05:47:50,757 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,759 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,761 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T05:47:50,762 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7f75e6015732,45055,1733723270640 from backup master directory 2024-12-09T05:47:50,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T05:47:50,763 WARN [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:50,763 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,769 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/hbase.id] with ID: 5b859e73-7e41-4fc4-9086-9026d6f1a6b3 2024-12-09T05:47:50,769 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/.tmp/hbase.id 2024-12-09T05:47:50,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:47:50,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:47:50,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741826_1002 (size=42) 2024-12-09T05:47:50,778 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/.tmp/hbase.id]:[hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/hbase.id] 2024-12-09T05:47:50,794 INFO [master/7f75e6015732:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T05:47:50,794 INFO [master/7f75e6015732:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T05:47:50,796 INFO [master/7f75e6015732:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T05:47:50,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:47:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:47:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741827_1003 (size=196) 2024-12-09T05:47:50,809 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:47:50,810 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T05:47:50,810 INFO [master/7f75e6015732:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:47:50,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:47:50,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:47:50,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741828_1004 (size=1189) 2024-12-09T05:47:50,824 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store 2024-12-09T05:47:50,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:47:50,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:47:50,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741829_1005 (size=34) 2024-12-09T05:47:50,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:50,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:47:50,838 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:50,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:50,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:47:50,838 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:50,838 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:50,839 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733723270838Disabling compacts and flushes for region at 1733723270838Disabling writes for close at 1733723270838Writing region close event to WAL at 1733723270838Closed at 1733723270838 2024-12-09T05:47:50,840 WARN [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/.initializing 2024-12-09T05:47:50,840 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/WALs/7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,843 INFO [master/7f75e6015732:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C45055%2C1733723270640, suffix=, logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/WALs/7f75e6015732,45055,1733723270640, archiveDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/oldWALs, maxLogs=10 2024-12-09T05:47:50,844 INFO [master/7f75e6015732:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7f75e6015732%2C45055%2C1733723270640.1733723270844 2024-12-09T05:47:50,853 INFO [master/7f75e6015732:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/WALs/7f75e6015732,45055,1733723270640/7f75e6015732%2C45055%2C1733723270640.1733723270844 2024-12-09T05:47:50,857 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41931:41931),(127.0.0.1/127.0.0.1:40309:40309),(127.0.0.1/127.0.0.1:41867:41867)] 2024-12-09T05:47:50,858 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:47:50,858 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:50,858 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,858 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T05:47:50,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:50,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T05:47:50,864 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:50,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T05:47:50,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:50,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T05:47:50,871 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:50,872 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,872 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,873 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,875 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,875 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,875 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:47:50,877 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T05:47:50,879 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:47:50,880 INFO [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59816972, jitterRate=-0.10865765810012817}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:47:50,881 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733723270858Initializing all the Stores at 1733723270860 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723270860Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723270860Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723270860Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723270860Cleaning up temporary data from old regions at 1733723270875 (+15 ms)Region opened successfully at 1733723270881 (+6 ms) 2024-12-09T05:47:50,881 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T05:47:50,886 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b53d609, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:50,887 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T05:47:50,887 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T05:47:50,887 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T05:47:50,887 INFO [master/7f75e6015732:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T05:47:50,888 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T05:47:50,888 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T05:47:50,888 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T05:47:50,891 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T05:47:50,891 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T05:47:50,892 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T05:47:50,893 INFO [master/7f75e6015732:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T05:47:50,893 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T05:47:50,894 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T05:47:50,894 INFO [master/7f75e6015732:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T05:47:50,898 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T05:47:50,899 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T05:47:50,900 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T05:47:50,900 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T05:47:50,903 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T05:47:50,903 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,907 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7f75e6015732,45055,1733723270640, sessionid=0x100bd850a4c0000, setting cluster-up flag (Was=false) 2024-12-09T05:47:50,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,912 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T05:47:50,912 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:50,917 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T05:47:50,918 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7f75e6015732,45055,1733723270640 2024-12-09T05:47:50,920 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T05:47:50,922 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:50,923 INFO [master/7f75e6015732:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T05:47:50,923 INFO [master/7f75e6015732:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T05:47:50,923 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7f75e6015732,45055,1733723270640 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7f75e6015732:0, corePoolSize=5, maxPoolSize=5 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7f75e6015732:0, corePoolSize=10, maxPoolSize=10 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:50,925 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:50,930 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733723300930 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T05:47:50,931 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T05:47:50,932 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T05:47:50,932 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T05:47:50,932 INFO [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T05:47:50,932 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:50,932 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T05:47:50,932 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.large.0-1733723270932,5,FailOnTimeoutGroup] 2024-12-09T05:47:50,933 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.small.0-1733723270933,5,FailOnTimeoutGroup] 2024-12-09T05:47:50,933 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:50,933 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T05:47:50,933 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:50,933 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:50,934 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,934 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T05:47:50,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741831_1007 (size=1321) 2024-12-09T05:47:50,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741831_1007 (size=1321) 2024-12-09T05:47:50,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741831_1007 (size=1321) 2024-12-09T05:47:50,948 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(746): ClusterId : 5b859e73-7e41-4fc4-9086-9026d6f1a6b3 2024-12-09T05:47:50,949 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(746): ClusterId : 5b859e73-7e41-4fc4-9086-9026d6f1a6b3 2024-12-09T05:47:50,949 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:47:50,949 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:47:50,949 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(746): ClusterId : 5b859e73-7e41-4fc4-9086-9026d6f1a6b3 2024-12-09T05:47:50,949 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T05:47:50,949 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T05:47:50,950 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348 2024-12-09T05:47:50,951 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:47:50,951 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:47:50,951 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:47:50,951 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:47:50,951 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T05:47:50,951 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T05:47:50,953 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:47:50,953 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:47:50,953 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T05:47:50,953 DEBUG [RS:1;7f75e6015732:41291 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@704c3a07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:50,953 DEBUG [RS:2;7f75e6015732:38343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73a5b8e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:50,953 DEBUG [RS:0;7f75e6015732:34875 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d2962ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7f75e6015732/172.17.0.2:0 2024-12-09T05:47:50,965 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;7f75e6015732:38343 2024-12-09T05:47:50,965 DEBUG [RS:0;7f75e6015732:34875 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7f75e6015732:34875 2024-12-09T05:47:50,966 INFO [RS:0;7f75e6015732:34875 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T05:47:50,966 INFO [RS:2;7f75e6015732:38343 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T05:47:50,966 INFO [RS:0;7f75e6015732:34875 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T05:47:50,966 INFO [RS:2;7f75e6015732:38343 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T05:47:50,966 DEBUG [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T05:47:50,966 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T05:47:50,967 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(2659): reportForDuty to master=7f75e6015732,45055,1733723270640 with port=38343, startcode=1733723270733 2024-12-09T05:47:50,967 DEBUG [RS:2;7f75e6015732:38343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:47:50,968 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(2659): reportForDuty to master=7f75e6015732,45055,1733723270640 with port=34875, startcode=1733723270682 2024-12-09T05:47:50,969 DEBUG [RS:0;7f75e6015732:34875 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:47:50,972 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7f75e6015732:41291 2024-12-09T05:47:50,972 INFO [RS:1;7f75e6015732:41291 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T05:47:50,973 INFO [RS:1;7f75e6015732:41291 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T05:47:50,973 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T05:47:50,974 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(2659): reportForDuty to master=7f75e6015732,45055,1733723270640 with port=41291, startcode=1733723270708 2024-12-09T05:47:50,974 DEBUG [RS:1;7f75e6015732:41291 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T05:47:50,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:47:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:47:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741832_1008 (size=32) 2024-12-09T05:47:50,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:50,977 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60959, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:47:50,977 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36425, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:47:50,978 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57527, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T05:47:50,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45055 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7f75e6015732,38343,1733723270733 2024-12-09T05:47:50,978 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45055 {}] master.ServerManager(517): Registering regionserver=7f75e6015732,38343,1733723270733 2024-12-09T05:47:50,980 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45055 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7f75e6015732,34875,1733723270682 2024-12-09T05:47:50,980 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45055 {}] master.ServerManager(517): Registering regionserver=7f75e6015732,34875,1733723270682 2024-12-09T05:47:50,980 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348 2024-12-09T05:47:50,980 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41783 2024-12-09T05:47:50,981 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T05:47:50,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:47:50,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:50,982 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45055 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7f75e6015732,41291,1733723270708 2024-12-09T05:47:50,983 DEBUG [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348 2024-12-09T05:47:50,982 DEBUG [RS:2;7f75e6015732:38343 {}] zookeeper.ZKUtil(111): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7f75e6015732,38343,1733723270733 2024-12-09T05:47:50,983 DEBUG [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41783 2024-12-09T05:47:50,983 WARN [RS:2;7f75e6015732:38343 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:50,983 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45055 {}] master.ServerManager(517): Registering regionserver=7f75e6015732,41291,1733723270708 2024-12-09T05:47:50,983 DEBUG [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T05:47:50,983 INFO [RS:2;7f75e6015732:38343 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:47:50,983 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,38343,1733723270733 2024-12-09T05:47:50,986 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7f75e6015732,38343,1733723270733] 2024-12-09T05:47:50,986 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348 2024-12-09T05:47:50,986 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:47:50,986 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41783 2024-12-09T05:47:50,986 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7f75e6015732,34875,1733723270682] 2024-12-09T05:47:50,986 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T05:47:50,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,987 DEBUG [RS:0;7f75e6015732:34875 {}] zookeeper.ZKUtil(111): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7f75e6015732,34875,1733723270682 2024-12-09T05:47:50,987 WARN [RS:0;7f75e6015732:34875 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:50,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:50,987 INFO [RS:0;7f75e6015732:34875 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:47:50,987 DEBUG [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,34875,1733723270682 2024-12-09T05:47:50,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T05:47:50,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:50,988 DEBUG [RS:1;7f75e6015732:41291 {}] zookeeper.ZKUtil(111): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7f75e6015732,41291,1733723270708 2024-12-09T05:47:50,988 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7f75e6015732,41291,1733723270708] 2024-12-09T05:47:50,988 WARN [RS:1;7f75e6015732:41291 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T05:47:50,988 INFO [RS:1;7f75e6015732:41291 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:47:50,989 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,41291,1733723270708 2024-12-09T05:47:50,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T05:47:50,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:50,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:47:50,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:47:50,992 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:50,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:47:50,994 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:47:50,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:50,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:50,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T05:47:50,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740 2024-12-09T05:47:50,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740 2024-12-09T05:47:50,998 INFO [RS:2;7f75e6015732:38343 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:47:50,998 INFO [RS:1;7f75e6015732:41291 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:47:51,000 INFO [RS:0;7f75e6015732:34875 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T05:47:51,001 INFO [RS:2;7f75e6015732:38343 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:47:51,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T05:47:51,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T05:47:51,002 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:47:51,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T05:47:51,005 INFO [RS:2;7f75e6015732:38343 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:47:51,005 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,009 INFO [RS:1;7f75e6015732:41291 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:47:51,009 INFO [RS:0;7f75e6015732:34875 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T05:47:51,011 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T05:47:51,011 INFO [RS:1;7f75e6015732:41291 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:47:51,011 INFO [RS:0;7f75e6015732:34875 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T05:47:51,011 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,011 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,011 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T05:47:51,012 INFO [RS:2;7f75e6015732:38343 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T05:47:51,012 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:47:51,012 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,012 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,012 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,012 INFO [RS:0;7f75e6015732:34875 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T05:47:51,012 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,012 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,012 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,012 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60519165, jitterRate=-0.0981941670179367}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:51,013 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733723270977Initializing all the Stores at 1733723270978 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723270979 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723270981 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723270981Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723270981Cleaning up temporary data from old regions at 1733723271001 (+20 ms)Region opened successfully at 1733723271013 (+12 ms) 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,013 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:51,013 DEBUG [RS:2;7f75e6015732:38343 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:51,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:47:51,014 DEBUG [RS:0;7f75e6015732:34875 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:51,014 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T05:47:51,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T05:47:51,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T05:47:51,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:47:51,014 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T05:47:51,014 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733723271013Disabling compacts and flushes for region at 1733723271013Disabling writes for close at 1733723271014 (+1 ms)Writing region close event to WAL at 1733723271014Closed at 1733723271014 2024-12-09T05:47:51,015 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,38343,1733723270733-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,015 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,34875,1733723270682-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:51,016 INFO [RS:1;7f75e6015732:41291 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T05:47:51,016 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,016 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,016 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,016 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7f75e6015732:0, corePoolSize=2, maxPoolSize=2 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7f75e6015732:0, corePoolSize=1, maxPoolSize=1 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:51,017 DEBUG [RS:1;7f75e6015732:41291 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0, corePoolSize=3, maxPoolSize=3 2024-12-09T05:47:51,018 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:51,018 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T05:47:51,018 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T05:47:51,020 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:47:51,022 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T05:47:51,025 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,025 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,025 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,025 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,025 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,025 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,41291,1733723270708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:51,031 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:47:51,031 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,38343,1733723270733-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,032 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,032 INFO [RS:2;7f75e6015732:38343 {}] regionserver.Replication(171): 7f75e6015732,38343,1733723270733 started 2024-12-09T05:47:51,032 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:47:51,032 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,34875,1733723270682-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,033 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,033 INFO [RS:0;7f75e6015732:34875 {}] regionserver.Replication(171): 7f75e6015732,34875,1733723270682 started 2024-12-09T05:47:51,044 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T05:47:51,044 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,044 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1482): Serving as 7f75e6015732,38343,1733723270733, RpcServer on 7f75e6015732/172.17.0.2:38343, sessionid=0x100bd850a4c0003 2024-12-09T05:47:51,044 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,41291,1733723270708-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,044 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:47:51,044 DEBUG [RS:2;7f75e6015732:38343 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7f75e6015732,38343,1733723270733 2024-12-09T05:47:51,044 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,044 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,38343,1733723270733' 2024-12-09T05:47:51,044 INFO [RS:1;7f75e6015732:41291 {}] regionserver.Replication(171): 7f75e6015732,41291,1733723270708 started 2024-12-09T05:47:51,044 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:47:51,045 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:47:51,046 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:47:51,046 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:47:51,046 DEBUG [RS:2;7f75e6015732:38343 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7f75e6015732,38343,1733723270733 2024-12-09T05:47:51,046 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,38343,1733723270733' 2024-12-09T05:47:51,046 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:47:51,046 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:47:51,047 DEBUG [RS:2;7f75e6015732:38343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:47:51,047 INFO [RS:2;7f75e6015732:38343 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:47:51,047 INFO [RS:2;7f75e6015732:38343 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:47:51,051 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,052 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(1482): Serving as 7f75e6015732,34875,1733723270682, RpcServer on 7f75e6015732/172.17.0.2:34875, sessionid=0x100bd850a4c0001 2024-12-09T05:47:51,052 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:47:51,052 DEBUG [RS:0;7f75e6015732:34875 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7f75e6015732,34875,1733723270682 2024-12-09T05:47:51,052 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,34875,1733723270682' 2024-12-09T05:47:51,052 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:47:51,053 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:47:51,053 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:47:51,053 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:47:51,053 DEBUG [RS:0;7f75e6015732:34875 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7f75e6015732,34875,1733723270682 2024-12-09T05:47:51,053 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,34875,1733723270682' 2024-12-09T05:47:51,053 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:47:51,054 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:47:51,054 DEBUG [RS:0;7f75e6015732:34875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:47:51,054 INFO [RS:0;7f75e6015732:34875 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:47:51,054 INFO [RS:0;7f75e6015732:34875 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:47:51,059 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,059 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1482): Serving as 7f75e6015732,41291,1733723270708, RpcServer on 7f75e6015732/172.17.0.2:41291, sessionid=0x100bd850a4c0002 2024-12-09T05:47:51,060 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T05:47:51,060 DEBUG [RS:1;7f75e6015732:41291 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7f75e6015732,41291,1733723270708 2024-12-09T05:47:51,060 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,41291,1733723270708' 2024-12-09T05:47:51,060 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T05:47:51,061 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T05:47:51,061 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T05:47:51,061 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T05:47:51,061 DEBUG [RS:1;7f75e6015732:41291 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7f75e6015732,41291,1733723270708 2024-12-09T05:47:51,061 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7f75e6015732,41291,1733723270708' 2024-12-09T05:47:51,061 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T05:47:51,062 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T05:47:51,062 DEBUG [RS:1;7f75e6015732:41291 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T05:47:51,062 INFO [RS:1;7f75e6015732:41291 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T05:47:51,062 INFO [RS:1;7f75e6015732:41291 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T05:47:51,153 INFO [RS:2;7f75e6015732:38343 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C38343%2C1733723270733, suffix=, logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,38343,1733723270733, archiveDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs, maxLogs=32 2024-12-09T05:47:51,156 INFO [RS:2;7f75e6015732:38343 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7f75e6015732%2C38343%2C1733723270733.1733723271155 2024-12-09T05:47:51,158 INFO [RS:0;7f75e6015732:34875 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C34875%2C1733723270682, suffix=, logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,34875,1733723270682, archiveDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs, maxLogs=32 2024-12-09T05:47:51,159 INFO [RS:0;7f75e6015732:34875 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7f75e6015732%2C34875%2C1733723270682.1733723271159 2024-12-09T05:47:51,164 INFO [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C41291%2C1733723270708, suffix=, logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,41291,1733723270708, archiveDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs, maxLogs=32 2024-12-09T05:47:51,165 INFO [RS:1;7f75e6015732:41291 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7f75e6015732%2C41291%2C1733723270708.1733723271165 2024-12-09T05:47:51,166 INFO [RS:2;7f75e6015732:38343 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,38343,1733723270733/7f75e6015732%2C38343%2C1733723270733.1733723271155 2024-12-09T05:47:51,168 DEBUG [RS:2;7f75e6015732:38343 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41867:41867),(127.0.0.1/127.0.0.1:40309:40309),(127.0.0.1/127.0.0.1:41931:41931)] 2024-12-09T05:47:51,172 INFO [RS:0;7f75e6015732:34875 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,34875,1733723270682/7f75e6015732%2C34875%2C1733723270682.1733723271159 2024-12-09T05:47:51,172 WARN [7f75e6015732:45055 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T05:47:51,174 DEBUG [RS:0;7f75e6015732:34875 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40309:40309),(127.0.0.1/127.0.0.1:41867:41867),(127.0.0.1/127.0.0.1:41931:41931)] 2024-12-09T05:47:51,176 INFO [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,41291,1733723270708/7f75e6015732%2C41291%2C1733723270708.1733723271165 2024-12-09T05:47:51,177 DEBUG [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40309:40309),(127.0.0.1/127.0.0.1:41931:41931),(127.0.0.1/127.0.0.1:41867:41867)] 2024-12-09T05:47:51,423 DEBUG [7f75e6015732:45055 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T05:47:51,423 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(204): Hosts are {7f75e6015732=0} racks are {/default-rack=0} 2024-12-09T05:47:51,429 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T05:47:51,429 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T05:47:51,429 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T05:47:51,429 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T05:47:51,430 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T05:47:51,430 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T05:47:51,430 INFO [7f75e6015732:45055 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T05:47:51,430 INFO [7f75e6015732:45055 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T05:47:51,430 INFO [7f75e6015732:45055 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T05:47:51,430 DEBUG [7f75e6015732:45055 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T05:47:51,431 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7f75e6015732,41291,1733723270708 2024-12-09T05:47:51,434 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7f75e6015732,41291,1733723270708, state=OPENING 2024-12-09T05:47:51,436 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T05:47:51,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:51,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:51,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:51,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:51,439 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T05:47:51,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7f75e6015732,41291,1733723270708}] 2024-12-09T05:47:51,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,440 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,594 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:47:51,596 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49741, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:47:51,600 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T05:47:51,600 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T05:47:51,602 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7f75e6015732%2C41291%2C1733723270708.meta, suffix=.meta, logDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,41291,1733723270708, archiveDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs, maxLogs=32 2024-12-09T05:47:51,603 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7f75e6015732%2C41291%2C1733723270708.meta.1733723271603.meta 2024-12-09T05:47:51,614 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/WALs/7f75e6015732,41291,1733723270708/7f75e6015732%2C41291%2C1733723270708.meta.1733723271603.meta 2024-12-09T05:47:51,616 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40309:40309),(127.0.0.1/127.0.0.1:41867:41867),(127.0.0.1/127.0.0.1:41931:41931)] 2024-12-09T05:47:51,616 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:47:51,617 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T05:47:51,617 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T05:47:51,617 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T05:47:51,617 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T05:47:51,617 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:51,617 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T05:47:51,617 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T05:47:51,619 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T05:47:51,621 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T05:47:51,621 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:51,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:51,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T05:47:51,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T05:47:51,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:51,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:51,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T05:47:51,624 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T05:47:51,624 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:51,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:51,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T05:47:51,626 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T05:47:51,626 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:51,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T05:47:51,627 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T05:47:51,628 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740 2024-12-09T05:47:51,630 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740 2024-12-09T05:47:51,632 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T05:47:51,632 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T05:47:51,633 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T05:47:51,636 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T05:47:51,637 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67871846, jitterRate=0.011369317770004272}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T05:47:51,637 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T05:47:51,639 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733723271618Writing region info on filesystem at 1733723271618Initializing all the Stores at 1733723271619 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723271619Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723271619Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723271619Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733723271619Cleaning up temporary data from old regions at 1733723271632 (+13 ms)Running coprocessor post-open hooks at 1733723271638 (+6 ms)Region opened successfully at 1733723271639 (+1 ms) 2024-12-09T05:47:51,641 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733723271594 2024-12-09T05:47:51,645 DEBUG [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T05:47:51,645 INFO [RS_OPEN_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T05:47:51,646 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7f75e6015732,41291,1733723270708 2024-12-09T05:47:51,648 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7f75e6015732,41291,1733723270708, state=OPEN 2024-12-09T05:47:51,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:51,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:51,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:51,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T05:47:51,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,650 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7f75e6015732,41291,1733723270708 2024-12-09T05:47:51,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T05:47:51,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T05:47:51,655 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7f75e6015732,41291,1733723270708 in 211 msec 2024-12-09T05:47:51,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T05:47:51,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 636 msec 2024-12-09T05:47:51,660 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T05:47:51,660 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T05:47:51,662 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T05:47:51,662 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7f75e6015732,41291,1733723270708, seqNum=-1] 2024-12-09T05:47:51,662 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:47:51,664 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54181, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:47:51,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 750 msec 2024-12-09T05:47:51,674 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733723271673, completionTime=-1 2024-12-09T05:47:51,674 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T05:47:51,674 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T05:47:51,676 INFO [master/7f75e6015732:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T05:47:51,676 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733723331676 2024-12-09T05:47:51,676 INFO [master/7f75e6015732:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733723391676 2024-12-09T05:47:51,677 INFO [master/7f75e6015732:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T05:47:51,677 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,45055,1733723270640-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,677 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,45055,1733723270640-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,677 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,45055,1733723270640-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,677 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7f75e6015732:45055, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,677 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,678 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,680 DEBUG [master/7f75e6015732:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.920sec 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,45055,1733723270640-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T05:47:51,684 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,45055,1733723270640-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T05:47:51,687 DEBUG [master/7f75e6015732:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T05:47:51,687 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T05:47:51,687 INFO [master/7f75e6015732:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7f75e6015732,45055,1733723270640-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T05:47:51,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb57756, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:47:51,749 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7f75e6015732,45055,-1 for getting cluster id 2024-12-09T05:47:51,749 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T05:47:51,751 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5b859e73-7e41-4fc4-9086-9026d6f1a6b3' 2024-12-09T05:47:51,751 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T05:47:51,751 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5b859e73-7e41-4fc4-9086-9026d6f1a6b3" 2024-12-09T05:47:51,752 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537879fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:47:51,752 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7f75e6015732,45055,-1] 2024-12-09T05:47:51,752 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T05:47:51,753 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:51,754 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35268, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T05:47:51,755 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7e7588, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T05:47:51,755 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T05:47:51,757 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7f75e6015732,41291,1733723270708, seqNum=-1] 2024-12-09T05:47:51,757 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:47:51,759 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:47:51,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7f75e6015732,45055,1733723270640 2024-12-09T05:47:51,762 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T05:47:51,763 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 7f75e6015732,45055,1733723270640 2024-12-09T05:47:51,763 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3da467e6 2024-12-09T05:47:51,763 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T05:47:51,765 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35272, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T05:47:51,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T05:47:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T05:47:51,769 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T05:47:51,770 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:51,770 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T05:47:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:51,772 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T05:47:51,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741837_1013 (size=392) 2024-12-09T05:47:51,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741837_1013 (size=392) 2024-12-09T05:47:51,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741837_1013 (size=392) 2024-12-09T05:47:51,784 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9d9a6328256ec26ad997d1aea8169bac, NAME => 'TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348 2024-12-09T05:47:51,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741838_1014 (size=51) 2024-12-09T05:47:51,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741838_1014 (size=51) 2024-12-09T05:47:51,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741838_1014 (size=51) 2024-12-09T05:47:51,796 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:51,796 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 9d9a6328256ec26ad997d1aea8169bac, disabling compactions & flushes 2024-12-09T05:47:51,796 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:51,796 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:51,797 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. after waiting 0 ms 2024-12-09T05:47:51,797 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:51,797 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:51,797 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9d9a6328256ec26ad997d1aea8169bac: Waiting for close lock at 1733723271796Disabling compacts and flushes for region at 1733723271796Disabling writes for close at 1733723271797 (+1 ms)Writing region close event to WAL at 1733723271797Closed at 1733723271797 2024-12-09T05:47:51,799 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T05:47:51,799 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733723271799"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733723271799"}]},"ts":"1733723271799"} 2024-12-09T05:47:51,802 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T05:47:51,804 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T05:47:51,804 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733723271804"}]},"ts":"1733723271804"} 2024-12-09T05:47:51,807 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T05:47:51,808 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {7f75e6015732=0} racks are {/default-rack=0} 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T05:47:51,809 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T05:47:51,809 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T05:47:51,809 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T05:47:51,809 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T05:47:51,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9d9a6328256ec26ad997d1aea8169bac, ASSIGN}] 2024-12-09T05:47:51,812 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9d9a6328256ec26ad997d1aea8169bac, ASSIGN 2024-12-09T05:47:51,813 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9d9a6328256ec26ad997d1aea8169bac, ASSIGN; state=OFFLINE, location=7f75e6015732,38343,1733723270733; forceNewPlan=false, retain=false 2024-12-09T05:47:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:51,964 INFO [7f75e6015732:45055 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T05:47:51,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9d9a6328256ec26ad997d1aea8169bac, regionState=OPENING, regionLocation=7f75e6015732,38343,1733723270733 2024-12-09T05:47:51,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9d9a6328256ec26ad997d1aea8169bac, ASSIGN because future has completed 2024-12-09T05:47:51,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9d9a6328256ec26ad997d1aea8169bac, server=7f75e6015732,38343,1733723270733}] 2024-12-09T05:47:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:52,125 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T05:47:52,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50099, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T05:47:52,132 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,132 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9d9a6328256ec26ad997d1aea8169bac, NAME => 'TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac.', STARTKEY => '', ENDKEY => ''} 2024-12-09T05:47:52,133 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,133 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T05:47:52,133 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,134 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,136 INFO [StoreOpener-9d9a6328256ec26ad997d1aea8169bac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,138 INFO [StoreOpener-9d9a6328256ec26ad997d1aea8169bac-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9d9a6328256ec26ad997d1aea8169bac columnFamilyName cf 2024-12-09T05:47:52,138 DEBUG [StoreOpener-9d9a6328256ec26ad997d1aea8169bac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T05:47:52,139 INFO [StoreOpener-9d9a6328256ec26ad997d1aea8169bac-1 {}] regionserver.HStore(327): Store=9d9a6328256ec26ad997d1aea8169bac/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T05:47:52,139 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,140 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,141 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,141 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,141 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,144 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,146 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T05:47:52,147 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9d9a6328256ec26ad997d1aea8169bac; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60393324, jitterRate=-0.10006934404373169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T05:47:52,147 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,148 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9d9a6328256ec26ad997d1aea8169bac: Running coprocessor pre-open hook at 1733723272134Writing region info on filesystem at 1733723272134Initializing all the Stores at 1733723272136 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733723272136Cleaning up temporary data from old regions at 1733723272141 (+5 ms)Running coprocessor post-open hooks at 1733723272147 (+6 ms)Region opened successfully at 1733723272148 (+1 ms) 2024-12-09T05:47:52,149 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac., pid=6, masterSystemTime=1733723272125 2024-12-09T05:47:52,153 DEBUG [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,153 INFO [RS_OPEN_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,154 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9d9a6328256ec26ad997d1aea8169bac, regionState=OPEN, openSeqNum=2, regionLocation=7f75e6015732,38343,1733723270733 2024-12-09T05:47:52,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9d9a6328256ec26ad997d1aea8169bac, server=7f75e6015732,38343,1733723270733 because future has completed 2024-12-09T05:47:52,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T05:47:52,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9d9a6328256ec26ad997d1aea8169bac, server=7f75e6015732,38343,1733723270733 in 190 msec 2024-12-09T05:47:52,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T05:47:52,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=9d9a6328256ec26ad997d1aea8169bac, ASSIGN in 355 msec 2024-12-09T05:47:52,169 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T05:47:52,169 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733723272169"}]},"ts":"1733723272169"} 2024-12-09T05:47:52,173 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T05:47:52,174 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T05:47:52,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 409 msec 2024-12-09T05:47:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T05:47:52,398 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T05:47:52,398 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T05:47:52,398 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:47:52,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T05:47:52,405 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T05:47:52,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T05:47:52,410 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac., hostname=7f75e6015732,38343,1733723270733, seqNum=2] 2024-12-09T05:47:52,410 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T05:47:52,412 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T05:47:52,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T05:47:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T05:47:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T05:47:52,419 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T05:47:52,421 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T05:47:52,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T05:47:52,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T05:47:52,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38343 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T05:47:52,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,577 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 9d9a6328256ec26ad997d1aea8169bac 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T05:47:52,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/.tmp/cf/ced50a8c4b2246f4be392c1a9f1a5097 is 36, key is row/cf:cq/1733723272413/Put/seqid=0 2024-12-09T05:47:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741839_1015 (size=4787) 2024-12-09T05:47:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741839_1015 (size=4787) 2024-12-09T05:47:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741839_1015 (size=4787) 2024-12-09T05:47:52,607 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/.tmp/cf/ced50a8c4b2246f4be392c1a9f1a5097 2024-12-09T05:47:52,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/.tmp/cf/ced50a8c4b2246f4be392c1a9f1a5097 as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/cf/ced50a8c4b2246f4be392c1a9f1a5097 2024-12-09T05:47:52,626 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/cf/ced50a8c4b2246f4be392c1a9f1a5097, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T05:47:52,628 INFO [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 9d9a6328256ec26ad997d1aea8169bac in 51ms, sequenceid=5, compaction requested=false 2024-12-09T05:47:52,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 9d9a6328256ec26ad997d1aea8169bac: 2024-12-09T05:47:52,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7f75e6015732:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T05:47:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T05:47:52,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T05:47:52,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-12-09T05:47:52,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 220 msec 2024-12-09T05:47:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45055 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T05:47:52,736 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T05:47:52,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T05:47:52,740 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T05:47:52,740 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:52,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,740 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T05:47:52,741 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T05:47:52,741 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=118606026, stopped=false 2024-12-09T05:47:52,741 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7f75e6015732,45055,1733723270640 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:52,742 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T05:47:52,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:52,742 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T05:47:52,743 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:52,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,743 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7f75e6015732,34875,1733723270682' ***** 2024-12-09T05:47:52,743 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T05:47:52,743 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7f75e6015732,41291,1733723270708' ***** 2024-12-09T05:47:52,743 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T05:47:52,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:52,743 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:52,743 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7f75e6015732,38343,1733723270733' ***** 2024-12-09T05:47:52,743 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:47:52,743 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T05:47:52,744 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T05:47:52,744 INFO [RS:0;7f75e6015732:34875 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:47:52,744 INFO [RS:0;7f75e6015732:34875 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:47:52,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:52,744 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(959): stopping server 7f75e6015732,34875,1733723270682 2024-12-09T05:47:52,744 INFO [RS:0;7f75e6015732:34875 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:52,744 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(959): stopping server 7f75e6015732,41291,1733723270708 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:52,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7f75e6015732:41291. 2024-12-09T05:47:52,744 DEBUG [RS:1;7f75e6015732:41291 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:52,744 DEBUG [RS:1;7f75e6015732:41291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,744 INFO [RS:1;7f75e6015732:41291 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:47:52,744 INFO [RS:0;7f75e6015732:34875 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7f75e6015732:34875. 2024-12-09T05:47:52,744 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T05:47:52,745 INFO [RS:1;7f75e6015732:41291 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:47:52,745 INFO [RS:1;7f75e6015732:41291 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:47:52,745 DEBUG [RS:0;7f75e6015732:34875 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:52,745 INFO [RS:2;7f75e6015732:38343 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T05:47:52,745 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T05:47:52,745 DEBUG [RS:0;7f75e6015732:34875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,745 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T05:47:52,745 INFO [RS:2;7f75e6015732:38343 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T05:47:52,745 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(3091): Received CLOSE for 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,745 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(976): stopping server 7f75e6015732,34875,1733723270682; all regions closed. 2024-12-09T05:47:52,745 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T05:47:52,745 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(959): stopping server 7f75e6015732,38343,1733723270733 2024-12-09T05:47:52,745 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T05:47:52,745 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T05:47:52,745 INFO [RS:2;7f75e6015732:38343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:52,745 DEBUG [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T05:47:52,745 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T05:47:52,745 INFO [RS:2;7f75e6015732:38343 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;7f75e6015732:38343. 2024-12-09T05:47:52,745 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T05:47:52,746 DEBUG [RS:2;7f75e6015732:38343 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T05:47:52,746 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 1 ms 2024-12-09T05:47:52,746 DEBUG [RS:2;7f75e6015732:38343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,746 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T05:47:52,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,746 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T05:47:52,746 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1325): Online Regions={9d9a6328256ec26ad997d1aea8169bac=TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac.} 2024-12-09T05:47:52,746 DEBUG [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1351): Waiting on 9d9a6328256ec26ad997d1aea8169bac 2024-12-09T05:47:52,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,746 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9d9a6328256ec26ad997d1aea8169bac, disabling compactions & flushes 2024-12-09T05:47:52,746 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T05:47:52,746 INFO [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,746 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,746 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,746 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,746 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. after waiting 0 ms 2024-12-09T05:47:52,746 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741834_1010 (size=93) 2024-12-09T05:47:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741834_1010 (size=93) 2024-12-09T05:47:52,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741834_1010 (size=93) 2024-12-09T05:47:52,754 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/default/TestHBaseWalOnEC/9d9a6328256ec26ad997d1aea8169bac/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T05:47:52,755 INFO [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,755 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9d9a6328256ec26ad997d1aea8169bac: Waiting for close lock at 1733723272746Running coprocessor pre-close hooks at 1733723272746Disabling compacts and flushes for region at 1733723272746Disabling writes for close at 1733723272746Writing region close event to WAL at 1733723272747 (+1 ms)Running coprocessor post-close hooks at 1733723272754 (+7 ms)Closed at 1733723272755 (+1 ms) 2024-12-09T05:47:52,755 DEBUG [RS_CLOSE_REGION-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac. 2024-12-09T05:47:52,763 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/info/f59e23b9f4d94343ad599886429e5f0a is 153, key is TestHBaseWalOnEC,,1733723271765.9d9a6328256ec26ad997d1aea8169bac./info:regioninfo/1733723272154/Put/seqid=0 2024-12-09T05:47:52,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741840_1016 (size=6637) 2024-12-09T05:47:52,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741840_1016 (size=6637) 2024-12-09T05:47:52,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741840_1016 (size=6637) 2024-12-09T05:47:52,770 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/info/f59e23b9f4d94343ad599886429e5f0a 2024-12-09T05:47:52,791 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/ns/5c3c3589d2fc4a84b65851acdae8370f is 43, key is default/ns:d/1733723271664/Put/seqid=0 2024-12-09T05:47:52,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741841_1017 (size=5153) 2024-12-09T05:47:52,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741841_1017 (size=5153) 2024-12-09T05:47:52,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741841_1017 (size=5153) 2024-12-09T05:47:52,799 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/ns/5c3c3589d2fc4a84b65851acdae8370f 2024-12-09T05:47:52,817 INFO [regionserver/7f75e6015732:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:52,817 INFO [regionserver/7f75e6015732:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:52,823 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/table/fa858e8929ca43429156b4d4e13a75f9 is 52, key is TestHBaseWalOnEC/table:state/1733723272169/Put/seqid=0 2024-12-09T05:47:52,827 INFO [regionserver/7f75e6015732:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:52,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741842_1018 (size=5249) 2024-12-09T05:47:52,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741842_1018 (size=5249) 2024-12-09T05:47:52,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741842_1018 (size=5249) 2024-12-09T05:47:52,831 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/table/fa858e8929ca43429156b4d4e13a75f9 2024-12-09T05:47:52,839 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/info/f59e23b9f4d94343ad599886429e5f0a as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/info/f59e23b9f4d94343ad599886429e5f0a 2024-12-09T05:47:52,846 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/info/f59e23b9f4d94343ad599886429e5f0a, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T05:47:52,848 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/ns/5c3c3589d2fc4a84b65851acdae8370f as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/ns/5c3c3589d2fc4a84b65851acdae8370f 2024-12-09T05:47:52,855 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/ns/5c3c3589d2fc4a84b65851acdae8370f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T05:47:52,856 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/.tmp/table/fa858e8929ca43429156b4d4e13a75f9 as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/table/fa858e8929ca43429156b4d4e13a75f9 2024-12-09T05:47:52,864 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/table/fa858e8929ca43429156b4d4e13a75f9, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T05:47:52,865 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 119ms, sequenceid=11, compaction requested=false 2024-12-09T05:47:52,870 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T05:47:52,871 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T05:47:52,871 INFO [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T05:47:52,871 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733723272745Running coprocessor pre-close hooks at 1733723272745Disabling compacts and flushes for region at 1733723272745Disabling writes for close at 1733723272746 (+1 ms)Obtaining lock to block concurrent updates at 1733723272746Preparing flush snapshotting stores in 1588230740 at 1733723272746Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733723272747 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733723272748 (+1 ms)Flushing 1588230740/info: creating writer at 1733723272748Flushing 1588230740/info: appending metadata at 1733723272763 (+15 ms)Flushing 1588230740/info: closing flushed file at 1733723272763Flushing 1588230740/ns: creating writer at 1733723272777 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733723272791 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733723272791Flushing 1588230740/table: creating writer at 1733723272807 (+16 ms)Flushing 1588230740/table: appending metadata at 1733723272823 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733723272823Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25f86d8d: reopening flushed file at 1733723272837 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7aaf4897: reopening flushed file at 1733723272846 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f4552c8: reopening flushed file at 1733723272855 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 119ms, sequenceid=11, compaction requested=false at 1733723272865 (+10 ms)Writing region close event to WAL at 1733723272866 (+1 ms)Running coprocessor post-close hooks at 1733723272871 (+5 ms)Closed at 1733723272871 2024-12-09T05:47:52,871 DEBUG [RS_CLOSE_META-regionserver/7f75e6015732:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T05:47:52,946 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(976): stopping server 7f75e6015732,41291,1733723270708; all regions closed. 2024-12-09T05:47:52,946 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(976): stopping server 7f75e6015732,38343,1733723270733; all regions closed. 2024-12-09T05:47:52,946 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,946 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741836_1012 (size=2751) 2024-12-09T05:47:52,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741833_1009 (size=1298) 2024-12-09T05:47:52,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741836_1012 (size=2751) 2024-12-09T05:47:52,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741833_1009 (size=1298) 2024-12-09T05:47:52,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741836_1012 (size=2751) 2024-12-09T05:47:52,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741833_1009 (size=1298) 2024-12-09T05:47:52,954 DEBUG [RS:2;7f75e6015732:38343 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7f75e6015732%2C38343%2C1733723270733:(num 1733723271155) 2024-12-09T05:47:52,954 DEBUG [RS:2;7f75e6015732:38343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] hbase.ChoreService(370): Chore service for: regionserver/7f75e6015732:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:47:52,954 INFO [RS:2;7f75e6015732:38343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:52,955 INFO [RS:2;7f75e6015732:38343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38343 2024-12-09T05:47:52,955 INFO [regionserver/7f75e6015732:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:52,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:52,956 DEBUG [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs 2024-12-09T05:47:52,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7f75e6015732,38343,1733723270733 2024-12-09T05:47:52,956 INFO [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7f75e6015732%2C41291%2C1733723270708.meta:.meta(num 1733723271603) 2024-12-09T05:47:52,956 INFO [RS:2;7f75e6015732:38343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:52,957 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,957 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7f75e6015732,38343,1733723270733] 2024-12-09T05:47:52,957 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,957 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,957 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:52,958 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7f75e6015732,38343,1733723270733 already deleted, retry=false 2024-12-09T05:47:52,958 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7f75e6015732,38343,1733723270733 expired; onlineServers=2 2024-12-09T05:47:52,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741835_1011 (size=93) 2024-12-09T05:47:52,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741835_1011 (size=93) 2024-12-09T05:47:52,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741835_1011 (size=93) 2024-12-09T05:47:52,963 DEBUG [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs 2024-12-09T05:47:52,963 INFO [RS:1;7f75e6015732:41291 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7f75e6015732%2C41291%2C1733723270708:(num 1733723271165) 2024-12-09T05:47:52,963 DEBUG [RS:1;7f75e6015732:41291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:52,963 INFO [RS:1;7f75e6015732:41291 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:52,964 INFO [RS:1;7f75e6015732:41291 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:52,964 INFO [RS:1;7f75e6015732:41291 {}] hbase.ChoreService(370): Chore service for: regionserver/7f75e6015732:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:52,964 INFO [RS:1;7f75e6015732:41291 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:52,964 INFO [regionserver/7f75e6015732:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:52,964 INFO [RS:1;7f75e6015732:41291 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41291 2024-12-09T05:47:52,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7f75e6015732,41291,1733723270708 2024-12-09T05:47:52,966 INFO [RS:1;7f75e6015732:41291 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:52,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:52,967 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7f75e6015732,41291,1733723270708] 2024-12-09T05:47:52,968 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7f75e6015732,41291,1733723270708 already deleted, retry=false 2024-12-09T05:47:52,968 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7f75e6015732,41291,1733723270708 expired; onlineServers=1 2024-12-09T05:47:53,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,058 INFO [RS:2;7f75e6015732:38343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:53,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38343-0x100bd850a4c0003, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,058 INFO [RS:2;7f75e6015732:38343 {}] regionserver.HRegionServer(1031): Exiting; stopping=7f75e6015732,38343,1733723270733; zookeeper connection closed. 2024-12-09T05:47:53,058 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@ff68d3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@ff68d3 2024-12-09T05:47:53,063 INFO [regionserver/7f75e6015732:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T05:47:53,063 INFO [regionserver/7f75e6015732:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T05:47:53,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,067 INFO [RS:1;7f75e6015732:41291 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:53,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41291-0x100bd850a4c0002, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,067 INFO [RS:1;7f75e6015732:41291 {}] regionserver.HRegionServer(1031): Exiting; stopping=7f75e6015732,41291,1733723270708; zookeeper connection closed. 2024-12-09T05:47:53,067 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6181ec26 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6181ec26 2024-12-09T05:47:53,156 DEBUG [RS:0;7f75e6015732:34875 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/oldWALs 2024-12-09T05:47:53,156 INFO [RS:0;7f75e6015732:34875 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7f75e6015732%2C34875%2C1733723270682:(num 1733723271159) 2024-12-09T05:47:53,156 DEBUG [RS:0;7f75e6015732:34875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T05:47:53,156 INFO [RS:0;7f75e6015732:34875 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T05:47:53,156 INFO [RS:0;7f75e6015732:34875 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:53,156 INFO [RS:0;7f75e6015732:34875 {}] hbase.ChoreService(370): Chore service for: regionserver/7f75e6015732:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:53,157 INFO [RS:0;7f75e6015732:34875 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T05:47:53,157 INFO [regionserver/7f75e6015732:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:53,157 INFO [RS:0;7f75e6015732:34875 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T05:47:53,157 INFO [RS:0;7f75e6015732:34875 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T05:47:53,157 INFO [RS:0;7f75e6015732:34875 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:53,158 INFO [RS:0;7f75e6015732:34875 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34875 2024-12-09T05:47:53,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7f75e6015732,34875,1733723270682 2024-12-09T05:47:53,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T05:47:53,160 INFO [RS:0;7f75e6015732:34875 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:53,162 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7f75e6015732,34875,1733723270682] 2024-12-09T05:47:53,162 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7f75e6015732,34875,1733723270682 already deleted, retry=false 2024-12-09T05:47:53,162 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7f75e6015732,34875,1733723270682 expired; onlineServers=0 2024-12-09T05:47:53,163 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7f75e6015732,45055,1733723270640' ***** 2024-12-09T05:47:53,163 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T05:47:53,163 INFO [M:0;7f75e6015732:45055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T05:47:53,163 INFO [M:0;7f75e6015732:45055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T05:47:53,163 DEBUG [M:0;7f75e6015732:45055 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T05:47:53,163 DEBUG [M:0;7f75e6015732:45055 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T05:47:53,163 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T05:47:53,163 DEBUG [master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.large.0-1733723270932 {}] cleaner.HFileCleaner(306): Exit Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.large.0-1733723270932,5,FailOnTimeoutGroup] 2024-12-09T05:47:53,163 DEBUG [master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.small.0-1733723270933 {}] cleaner.HFileCleaner(306): Exit Thread[master/7f75e6015732:0:becomeActiveMaster-HFileCleaner.small.0-1733723270933,5,FailOnTimeoutGroup] 2024-12-09T05:47:53,163 INFO [M:0;7f75e6015732:45055 {}] hbase.ChoreService(370): Chore service for: master/7f75e6015732:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T05:47:53,163 INFO [M:0;7f75e6015732:45055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T05:47:53,163 DEBUG [M:0;7f75e6015732:45055 {}] master.HMaster(1795): Stopping service threads 2024-12-09T05:47:53,163 INFO [M:0;7f75e6015732:45055 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T05:47:53,163 INFO [M:0;7f75e6015732:45055 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T05:47:53,164 INFO [M:0;7f75e6015732:45055 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T05:47:53,164 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T05:47:53,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T05:47:53,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T05:47:53,164 DEBUG [M:0;7f75e6015732:45055 {}] zookeeper.ZKUtil(347): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T05:47:53,164 WARN [M:0;7f75e6015732:45055 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T05:47:53,165 INFO [M:0;7f75e6015732:45055 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/.lastflushedseqids 2024-12-09T05:47:53,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741843_1019 (size=127) 2024-12-09T05:47:53,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741843_1019 (size=127) 2024-12-09T05:47:53,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741843_1019 (size=127) 2024-12-09T05:47:53,176 INFO [M:0;7f75e6015732:45055 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T05:47:53,176 INFO [M:0;7f75e6015732:45055 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T05:47:53,176 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T05:47:53,176 INFO [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:53,176 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:53,176 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T05:47:53,176 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:53,176 INFO [M:0;7f75e6015732:45055 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-09T05:47:53,191 DEBUG [M:0;7f75e6015732:45055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f94804768d04deb8dd89b25ae26ea1b is 82, key is hbase:meta,,1/info:regioninfo/1733723271646/Put/seqid=0 2024-12-09T05:47:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741844_1020 (size=5672) 2024-12-09T05:47:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741844_1020 (size=5672) 2024-12-09T05:47:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741844_1020 (size=5672) 2024-12-09T05:47:53,199 INFO [M:0;7f75e6015732:45055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f94804768d04deb8dd89b25ae26ea1b 2024-12-09T05:47:53,220 DEBUG [M:0;7f75e6015732:45055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/908d0bbcfb8d40a3aad31a5ccd767353 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733723272176/Put/seqid=0 2024-12-09T05:47:53,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741845_1021 (size=6440) 2024-12-09T05:47:53,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741845_1021 (size=6440) 2024-12-09T05:47:53,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741845_1021 (size=6440) 2024-12-09T05:47:53,227 INFO [M:0;7f75e6015732:45055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/908d0bbcfb8d40a3aad31a5ccd767353 2024-12-09T05:47:53,247 DEBUG [M:0;7f75e6015732:45055 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7bbaf74d87ec48df8f14327b65d8299b is 69, key is 7f75e6015732,34875,1733723270682/rs:state/1733723270981/Put/seqid=0 2024-12-09T05:47:53,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741846_1022 (size=5294) 2024-12-09T05:47:53,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741846_1022 (size=5294) 2024-12-09T05:47:53,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741846_1022 (size=5294) 2024-12-09T05:47:53,254 INFO [M:0;7f75e6015732:45055 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7bbaf74d87ec48df8f14327b65d8299b 2024-12-09T05:47:53,261 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7f94804768d04deb8dd89b25ae26ea1b as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7f94804768d04deb8dd89b25ae26ea1b 2024-12-09T05:47:53,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,262 INFO [RS:0;7f75e6015732:34875 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:53,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34875-0x100bd850a4c0001, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,262 INFO [RS:0;7f75e6015732:34875 {}] regionserver.HRegionServer(1031): Exiting; stopping=7f75e6015732,34875,1733723270682; zookeeper connection closed. 2024-12-09T05:47:53,262 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56638a29 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56638a29 2024-12-09T05:47:53,262 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T05:47:53,268 INFO [M:0;7f75e6015732:45055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7f94804768d04deb8dd89b25ae26ea1b, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T05:47:53,269 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/908d0bbcfb8d40a3aad31a5ccd767353 as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/908d0bbcfb8d40a3aad31a5ccd767353 2024-12-09T05:47:53,276 INFO [M:0;7f75e6015732:45055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/908d0bbcfb8d40a3aad31a5ccd767353, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T05:47:53,277 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7bbaf74d87ec48df8f14327b65d8299b as hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7bbaf74d87ec48df8f14327b65d8299b 2024-12-09T05:47:53,282 INFO [M:0;7f75e6015732:45055 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41783/user/jenkins/test-data/8dbba659-dc80-fdca-28f0-4b235d8de348/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7bbaf74d87ec48df8f14327b65d8299b, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T05:47:53,284 INFO [M:0;7f75e6015732:45055 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=72, compaction requested=false 2024-12-09T05:47:53,285 INFO [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T05:47:53,285 DEBUG [M:0;7f75e6015732:45055 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733723273176Disabling compacts and flushes for region at 1733723273176Disabling writes for close at 1733723273176Obtaining lock to block concurrent updates at 1733723273176Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733723273176Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733723273177 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733723273177Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733723273178 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733723273191 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733723273191Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733723273205 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733723273219 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733723273219Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733723273233 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733723273247 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733723273247Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a369708: reopening flushed file at 1733723273260 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69137fa0: reopening flushed file at 1733723273268 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78cfcfc2: reopening flushed file at 1733723273276 (+8 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=72, compaction requested=false at 1733723273284 (+8 ms)Writing region close event to WAL at 1733723273285 (+1 ms)Closed at 1733723273285 2024-12-09T05:47:53,285 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:53,285 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:53,286 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:53,286 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:53,286 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T05:47:53,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36435 is added to blk_1073741830_1006 (size=32683) 2024-12-09T05:47:53,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38311 is added to blk_1073741830_1006 (size=32683) 2024-12-09T05:47:53,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38537 is added to blk_1073741830_1006 (size=32683) 2024-12-09T05:47:53,290 INFO [M:0;7f75e6015732:45055 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T05:47:53,290 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T05:47:53,290 INFO [M:0;7f75e6015732:45055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45055 2024-12-09T05:47:53,290 INFO [M:0;7f75e6015732:45055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T05:47:53,392 INFO [M:0;7f75e6015732:45055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T05:47:53,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45055-0x100bd850a4c0000, quorum=127.0.0.1:61000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T05:47:53,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61cce6fc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:53,397 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64e2e59d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:53,398 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:53,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ec581d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:53,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19f40ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:53,400 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:47:53,400 WARN [BP-514775923-172.17.0.2-1733723269939 heartbeating to localhost/127.0.0.1:41783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:47:53,400 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:47:53,400 WARN [BP-514775923-172.17.0.2-1733723269939 heartbeating to localhost/127.0.0.1:41783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-514775923-172.17.0.2-1733723269939 (Datanode Uuid 46c9fd07-72f0-463d-a850-fa74c75782b8) service to localhost/127.0.0.1:41783 2024-12-09T05:47:53,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data5/current/BP-514775923-172.17.0.2-1733723269939 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:53,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data6/current/BP-514775923-172.17.0.2-1733723269939 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:53,401 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:47:53,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f02dcba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:53,404 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15fff4df{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:53,404 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:53,404 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ae51624{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:53,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fee469f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:53,405 WARN [BP-514775923-172.17.0.2-1733723269939 heartbeating to localhost/127.0.0.1:41783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:47:53,405 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:47:53,406 WARN [BP-514775923-172.17.0.2-1733723269939 heartbeating to localhost/127.0.0.1:41783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-514775923-172.17.0.2-1733723269939 (Datanode Uuid c99d5ef1-5bfa-4d0c-8f19-e83d2dfec4f4) service to localhost/127.0.0.1:41783 2024-12-09T05:47:53,406 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:47:53,406 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data3/current/BP-514775923-172.17.0.2-1733723269939 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:53,406 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data4/current/BP-514775923-172.17.0.2-1733723269939 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:53,406 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:47:53,408 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b7198f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T05:47:53,408 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69996946{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:53,408 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:53,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e494f88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:53,409 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2591ff9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:53,413 WARN [BP-514775923-172.17.0.2-1733723269939 heartbeating to localhost/127.0.0.1:41783 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T05:47:53,413 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T05:47:53,413 WARN [BP-514775923-172.17.0.2-1733723269939 heartbeating to localhost/127.0.0.1:41783 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-514775923-172.17.0.2-1733723269939 (Datanode Uuid cdf4dd8b-003d-4f33-84b6-04f786215bcc) service to localhost/127.0.0.1:41783 2024-12-09T05:47:53,413 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T05:47:53,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data1/current/BP-514775923-172.17.0.2-1733723269939 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:53,414 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/cluster_0c4d9564-229f-4559-303e-61ce9a000006/data/data2/current/BP-514775923-172.17.0.2-1733723269939 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T05:47:53,414 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T05:47:53,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@46039787{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T05:47:53,420 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7326bb42{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T05:47:53,420 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T05:47:53,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71b7cabb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T05:47:53,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@715f09c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cc54e734-8836-5618-52ba-cdd0210ca1ab/hadoop.log.dir/,STOPPED} 2024-12-09T05:47:53,429 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T05:47:53,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T05:47:53,462 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=150 (was 91) - Thread LEAK? -, OpenFileDescriptor=521 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=380 (was 380), ProcessCount=11 (was 11), AvailableMemoryMB=8503 (was 8665)