2024-12-11 22:37:55,827 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 22:37:55,844 main DEBUG Took 0.014053 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 22:37:55,844 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 22:37:55,845 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 22:37:55,846 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 22:37:55,848 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,862 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 22:37:55,977 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,979 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,980 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,980 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,981 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,981 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,982 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,982 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,983 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,988 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,990 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,990 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,991 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,992 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,993 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:55,995 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:55,996 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:56,002 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:56,003 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:56,004 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:56,006 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:56,009 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:56,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 22:37:56,012 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:56,012 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 22:37:56,014 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 22:37:56,016 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 22:37:56,019 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 22:37:56,020 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 22:37:56,022 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 22:37:56,022 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 22:37:56,034 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 22:37:56,037 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 22:37:56,040 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 22:37:56,040 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 22:37:56,041 main DEBUG createAppenders(={Console}) 2024-12-11 22:37:56,058 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-11 22:37:56,058 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 22:37:56,058 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-11 22:37:56,059 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 22:37:56,060 main DEBUG OutputStream closed 2024-12-11 22:37:56,060 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 22:37:56,060 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 22:37:56,072 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-11 22:37:56,225 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 22:37:56,235 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 22:37:56,244 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 22:37:56,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 22:37:56,255 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 22:37:56,255 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 22:37:56,256 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 22:37:56,257 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 22:37:56,257 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 22:37:56,258 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 22:37:56,258 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 22:37:56,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 22:37:56,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 22:37:56,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 22:37:56,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 22:37:56,261 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 22:37:56,261 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 22:37:56,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 22:37:56,266 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 22:37:56,266 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-11 22:37:56,267 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 22:37:56,269 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-11T22:37:56,295 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-11 22:37:56,301 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 22:37:56,302 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T22:37:56,643 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1 2024-12-11T22:37:56,686 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f, deleteOnExit=true 2024-12-11T22:37:56,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/test.cache.data in system properties and HBase conf 2024-12-11T22:37:56,696 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T22:37:56,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir in system properties and HBase conf 2024-12-11T22:37:56,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T22:37:56,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T22:37:56,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T22:37:56,842 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T22:37:56,977 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T22:37:56,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:37:56,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:37:56,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T22:37:56,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:37:56,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T22:37:56,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T22:37:56,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:37:56,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:37:56,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T22:37:56,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/nfs.dump.dir in system properties and HBase conf 2024-12-11T22:37:56,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/java.io.tmpdir in system properties and HBase conf 2024-12-11T22:37:56,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:37:56,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T22:37:57,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T22:37:58,528 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T22:37:58,654 INFO [Time-limited test {}] log.Log(170): Logging initialized @3641ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T22:37:58,785 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:37:58,899 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:37:58,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:37:58,959 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:37:58,960 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:37:58,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:37:58,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b4eb733{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:37:58,994 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c6a701e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:37:59,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@753cff0b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/java.io.tmpdir/jetty-localhost-37993-hadoop-hdfs-3_4_1-tests_jar-_-any-1775274849124380185/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T22:37:59,277 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78567fa0{HTTP/1.1, (http/1.1)}{localhost:37993} 2024-12-11T22:37:59,278 INFO [Time-limited test {}] server.Server(415): Started @4266ms 2024-12-11T22:37:59,973 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:37:59,995 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:00,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:00,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:00,008 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:38:00,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26fd7980{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:00,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4802e856{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:00,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68c42837{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/java.io.tmpdir/jetty-localhost-35855-hadoop-hdfs-3_4_1-tests_jar-_-any-6146938738428326003/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:00,176 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@736038db{HTTP/1.1, (http/1.1)}{localhost:35855} 2024-12-11T22:38:00,176 INFO [Time-limited test {}] server.Server(415): Started @5164ms 2024-12-11T22:38:00,262 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:38:00,520 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:00,534 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:00,560 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:00,560 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:00,560 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T22:38:00,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f0232ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:00,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31fc7e57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:00,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@51be63ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/java.io.tmpdir/jetty-localhost-36987-hadoop-hdfs-3_4_1-tests_jar-_-any-7377417650133950109/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:00,744 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@106158ef{HTTP/1.1, (http/1.1)}{localhost:36987} 2024-12-11T22:38:00,744 INFO [Time-limited test {}] server.Server(415): Started @5732ms 2024-12-11T22:38:00,749 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:38:00,878 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:00,895 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:00,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:00,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:00,917 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:38:00,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77df1a06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:00,921 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a4f4410{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:01,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d005cc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/java.io.tmpdir/jetty-localhost-45915-hadoop-hdfs-3_4_1-tests_jar-_-any-3660156364899506172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:01,067 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@492d1201{HTTP/1.1, (http/1.1)}{localhost:45915} 2024-12-11T22:38:01,068 INFO [Time-limited test {}] server.Server(415): Started @6056ms 2024-12-11T22:38:01,071 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:38:01,983 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data2/current/BP-1171211553-172.17.0.2-1733956677892/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:01,987 WARN [Thread-119 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data1/current/BP-1171211553-172.17.0.2-1733956677892/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:01,987 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data3/current/BP-1171211553-172.17.0.2-1733956677892/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:01,991 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data4/current/BP-1171211553-172.17.0.2-1733956677892/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:02,104 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:38:02,118 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:38:02,181 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1c376a1e6916c8e with lease ID 0xb7cd0fd3f47c834f: Processing first storage report for DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1 from datanode DatanodeRegistration(127.0.0.1:42667, datanodeUuid=da6ceb27-3dca-4dd5-a0a0-9ca23cb9a791, infoPort=38855, infoSecurePort=0, ipcPort=44827, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892) 2024-12-11T22:38:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1c376a1e6916c8e with lease ID 0xb7cd0fd3f47c834f: from storage DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1 node DatanodeRegistration(127.0.0.1:42667, datanodeUuid=da6ceb27-3dca-4dd5-a0a0-9ca23cb9a791, infoPort=38855, infoSecurePort=0, ipcPort=44827, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T22:38:02,184 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x791d0792e208fa2c with lease ID 0xb7cd0fd3f47c8350: Processing first storage report for DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff from datanode DatanodeRegistration(127.0.0.1:40975, datanodeUuid=7c23c8bb-89ba-4b4a-bdb6-4c3aa697535e, infoPort=34717, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892) 2024-12-11T22:38:02,184 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x791d0792e208fa2c with lease ID 0xb7cd0fd3f47c8350: from storage DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff node DatanodeRegistration(127.0.0.1:40975, datanodeUuid=7c23c8bb-89ba-4b4a-bdb6-4c3aa697535e, infoPort=34717, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:02,184 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1c376a1e6916c8e with lease ID 0xb7cd0fd3f47c834f: Processing first storage report for DS-e8f50bf4-409f-4b6d-8aba-7ff55f1d507f from datanode DatanodeRegistration(127.0.0.1:42667, datanodeUuid=da6ceb27-3dca-4dd5-a0a0-9ca23cb9a791, infoPort=38855, infoSecurePort=0, ipcPort=44827, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892) 2024-12-11T22:38:02,185 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1c376a1e6916c8e with lease ID 0xb7cd0fd3f47c834f: from storage DS-e8f50bf4-409f-4b6d-8aba-7ff55f1d507f node DatanodeRegistration(127.0.0.1:42667, datanodeUuid=da6ceb27-3dca-4dd5-a0a0-9ca23cb9a791, infoPort=38855, infoSecurePort=0, ipcPort=44827, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:02,191 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x791d0792e208fa2c with lease ID 0xb7cd0fd3f47c8350: Processing first storage report for DS-fc43ced2-b244-4cc1-a519-43e86cfead88 from datanode DatanodeRegistration(127.0.0.1:40975, datanodeUuid=7c23c8bb-89ba-4b4a-bdb6-4c3aa697535e, infoPort=34717, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892) 2024-12-11T22:38:02,192 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x791d0792e208fa2c with lease ID 0xb7cd0fd3f47c8350: from storage DS-fc43ced2-b244-4cc1-a519-43e86cfead88 node DatanodeRegistration(127.0.0.1:40975, datanodeUuid=7c23c8bb-89ba-4b4a-bdb6-4c3aa697535e, infoPort=34717, infoSecurePort=0, ipcPort=44447, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:02,368 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data5/current/BP-1171211553-172.17.0.2-1733956677892/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:02,376 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data6/current/BP-1171211553-172.17.0.2-1733956677892/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:02,440 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:38:02,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfbe8050ad89acbd7 with lease ID 0xb7cd0fd3f47c8351: Processing first storage report for DS-e90eca3c-555b-4efd-ab6d-aadeb110df74 from datanode DatanodeRegistration(127.0.0.1:39123, datanodeUuid=9e9aaf16-589e-4193-91f8-2d242950ef52, infoPort=41491, infoSecurePort=0, ipcPort=38873, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892) 2024-12-11T22:38:02,453 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfbe8050ad89acbd7 with lease ID 0xb7cd0fd3f47c8351: from storage DS-e90eca3c-555b-4efd-ab6d-aadeb110df74 node DatanodeRegistration(127.0.0.1:39123, datanodeUuid=9e9aaf16-589e-4193-91f8-2d242950ef52, infoPort=41491, infoSecurePort=0, ipcPort=38873, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T22:38:02,453 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfbe8050ad89acbd7 with lease ID 0xb7cd0fd3f47c8351: Processing first storage report for DS-ba5d4b38-12ac-4b2c-8851-f463c4755214 from datanode DatanodeRegistration(127.0.0.1:39123, datanodeUuid=9e9aaf16-589e-4193-91f8-2d242950ef52, infoPort=41491, infoSecurePort=0, ipcPort=38873, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892) 2024-12-11T22:38:02,454 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfbe8050ad89acbd7 with lease ID 0xb7cd0fd3f47c8351: from storage DS-ba5d4b38-12ac-4b2c-8851-f463c4755214 node DatanodeRegistration(127.0.0.1:39123, datanodeUuid=9e9aaf16-589e-4193-91f8-2d242950ef52, infoPort=41491, infoSecurePort=0, ipcPort=38873, storageInfo=lv=-57;cid=testClusterID;nsid=640464079;c=1733956677892), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:02,556 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1 2024-12-11T22:38:02,684 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-11T22:38:02,843 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=153, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=957, ProcessCount=11, AvailableMemoryMB=5488 2024-12-11T22:38:02,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T22:38:02,861 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-11T22:38:03,025 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/zookeeper_0, clientPort=50515, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T22:38:03,051 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50515 2024-12-11T22:38:03,075 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:03,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:03,290 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:03,291 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:03,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:37596 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37596 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:03,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-11T22:38:03,848 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:03,862 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049 with version=8 2024-12-11T22:38:03,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/hbase-staging 2024-12-11T22:38:03,994 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T22:38:04,340 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:04,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:04,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:04,360 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:04,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:04,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:04,541 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T22:38:04,617 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T22:38:04,630 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T22:38:04,636 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:04,670 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 32336 (auto-detected) 2024-12-11T22:38:04,671 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T22:38:04,707 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45863 2024-12-11T22:38:04,735 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45863 connecting to ZooKeeper ensemble=127.0.0.1:50515 2024-12-11T22:38:04,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458630x0, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:04,825 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45863-0x100cb7d24180000 connected 2024-12-11T22:38:04,900 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:04,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:04,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:04,922 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049, hbase.cluster.distributed=false 2024-12-11T22:38:04,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:04,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45863 2024-12-11T22:38:04,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45863 2024-12-11T22:38:04,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45863 2024-12-11T22:38:04,964 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45863 2024-12-11T22:38:04,964 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45863 2024-12-11T22:38:05,078 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:05,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,080 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:05,080 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:05,083 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:38:05,085 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:05,086 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46783 2024-12-11T22:38:05,088 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46783 connecting to ZooKeeper ensemble=127.0.0.1:50515 2024-12-11T22:38:05,089 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,093 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467830x0, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:05,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46783-0x100cb7d24180001 connected 2024-12-11T22:38:05,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:05,116 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:38:05,124 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T22:38:05,128 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:38:05,135 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:05,139 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46783 2024-12-11T22:38:05,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46783 2024-12-11T22:38:05,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46783 2024-12-11T22:38:05,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46783 2024-12-11T22:38:05,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46783 2024-12-11T22:38:05,162 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:05,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,163 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:05,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,163 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:05,164 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:38:05,164 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:05,165 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44097 2024-12-11T22:38:05,167 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44097 connecting to ZooKeeper ensemble=127.0.0.1:50515 2024-12-11T22:38:05,168 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,172 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440970x0, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:05,182 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44097-0x100cb7d24180002 connected 2024-12-11T22:38:05,184 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:05,186 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:38:05,194 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T22:38:05,199 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:38:05,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:05,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44097 2024-12-11T22:38:05,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44097 2024-12-11T22:38:05,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44097 2024-12-11T22:38:05,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44097 2024-12-11T22:38:05,215 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44097 2024-12-11T22:38:05,239 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:05,240 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,240 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,240 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:05,240 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:05,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:05,241 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:38:05,241 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:05,251 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45281 2024-12-11T22:38:05,253 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45281 connecting to ZooKeeper ensemble=127.0.0.1:50515 2024-12-11T22:38:05,257 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,283 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452810x0, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:05,287 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45281-0x100cb7d24180003 connected 2024-12-11T22:38:05,288 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:05,289 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:38:05,291 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T22:38:05,294 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:38:05,301 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:05,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45281 2024-12-11T22:38:05,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45281 2024-12-11T22:38:05,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45281 2024-12-11T22:38:05,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45281 2024-12-11T22:38:05,315 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45281 2024-12-11T22:38:05,334 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b762025f20c5:45863 2024-12-11T22:38:05,335 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b762025f20c5,45863,1733956684084 2024-12-11T22:38:05,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,349 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,352 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b762025f20c5,45863,1733956684084 2024-12-11T22:38:05,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:05,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,392 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:05,393 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:05,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,399 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T22:38:05,404 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b762025f20c5,45863,1733956684084 from backup master directory 2024-12-11T22:38:05,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b762025f20c5,45863,1733956684084 2024-12-11T22:38:05,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,517 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:05,518 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:05,518 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b762025f20c5,45863,1733956684084 2024-12-11T22:38:05,520 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T22:38:05,521 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T22:38:05,600 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/hbase.id] with ID: 191af486-7db3-45e0-8de2-f6b4aa97bbc5 2024-12-11T22:38:05,600 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/.tmp/hbase.id 2024-12-11T22:38:05,624 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:05,625 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:05,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:37626 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37626 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:05,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-11T22:38:05,661 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:05,662 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/.tmp/hbase.id]:[hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/hbase.id] 2024-12-11T22:38:05,767 INFO [master/b762025f20c5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:05,776 INFO [master/b762025f20c5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T22:38:05,809 INFO [master/b762025f20c5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 30ms. 2024-12-11T22:38:05,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,844 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:05,879 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:05,879 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:05,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:37654 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37654 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:05,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-11T22:38:05,944 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:05,972 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:38:05,975 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T22:38:05,985 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:38:06,026 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:06,027 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:06,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:34698 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:42667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34698 dst: /127.0.0.1:42667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:06,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-11T22:38:06,041 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:06,060 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store 2024-12-11T22:38:06,083 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:06,083 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:06,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:34716 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34716 dst: /127.0.0.1:42667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:06,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-11T22:38:06,109 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:06,115 INFO [master/b762025f20c5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T22:38:06,119 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:06,121 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T22:38:06,122 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:06,122 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:06,129 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T22:38:06,129 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:06,129 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:06,131 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733956686121Disabling compacts and flushes for region at 1733956686121Disabling writes for close at 1733956686129 (+8 ms)Writing region close event to WAL at 1733956686129Closed at 1733956686129 2024-12-11T22:38:06,133 WARN [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/.initializing 2024-12-11T22:38:06,134 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/WALs/b762025f20c5,45863,1733956684084 2024-12-11T22:38:06,145 INFO [master/b762025f20c5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:38:06,166 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C45863%2C1733956684084, suffix=, logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/WALs/b762025f20c5,45863,1733956684084, archiveDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/oldWALs, maxLogs=10 2024-12-11T22:38:06,210 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/WALs/b762025f20c5,45863,1733956684084/b762025f20c5%2C45863%2C1733956684084.1733956686173, exclude list is [], retry=0 2024-12-11T22:38:06,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:06,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39123,DS-e90eca3c-555b-4efd-ab6d-aadeb110df74,DISK] 2024-12-11T22:38:06,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42667,DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1,DISK] 2024-12-11T22:38:06,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40975,DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff,DISK] 2024-12-11T22:38:06,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T22:38:06,288 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/WALs/b762025f20c5,45863,1733956684084/b762025f20c5%2C45863%2C1733956684084.1733956686173 2024-12-11T22:38:06,289 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34717:34717),(127.0.0.1/127.0.0.1:41491:41491),(127.0.0.1/127.0.0.1:38855:38855)] 2024-12-11T22:38:06,290 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:38:06,290 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:06,295 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,296 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T22:38:06,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:06,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:06,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T22:38:06,368 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:06,369 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:06,370 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T22:38:06,373 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:06,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:06,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T22:38:06,379 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:06,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:06,381 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,386 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,388 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,395 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,396 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,400 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:38:06,407 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:06,416 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:38:06,417 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70234498, jitterRate=0.046575576066970825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:38:06,430 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733956686309Initializing all the Stores at 1733956686311 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956686312 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956686313 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956686313Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956686313Cleaning up temporary data from old regions at 1733956686396 (+83 ms)Region opened successfully at 1733956686429 (+33 ms) 2024-12-11T22:38:06,432 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T22:38:06,471 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c8f3379, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:06,510 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T22:38:06,523 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T22:38:06,523 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T22:38:06,526 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T22:38:06,527 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T22:38:06,533 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-11T22:38:06,533 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T22:38:06,563 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T22:38:06,572 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T22:38:06,581 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T22:38:06,584 INFO [master/b762025f20c5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T22:38:06,586 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T22:38:06,596 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T22:38:06,599 INFO [master/b762025f20c5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T22:38:06,603 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T22:38:06,614 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T22:38:06,616 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T22:38:06,623 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T22:38:06,642 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T22:38:06,648 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T22:38:06,656 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,656 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,659 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b762025f20c5,45863,1733956684084, sessionid=0x100cb7d24180000, setting cluster-up flag (Was=false) 2024-12-11T22:38:06,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,681 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,706 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T22:38:06,708 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b762025f20c5,45863,1733956684084 2024-12-11T22:38:06,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,723 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:06,748 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T22:38:06,749 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b762025f20c5,45863,1733956684084 2024-12-11T22:38:06,755 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T22:38:06,830 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(746): ClusterId : 191af486-7db3-45e0-8de2-f6b4aa97bbc5 2024-12-11T22:38:06,832 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(746): ClusterId : 191af486-7db3-45e0-8de2-f6b4aa97bbc5 2024-12-11T22:38:06,832 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(746): ClusterId : 191af486-7db3-45e0-8de2-f6b4aa97bbc5 2024-12-11T22:38:06,833 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:38:06,834 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:38:06,835 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:38:06,851 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:06,861 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:38:06,861 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:38:06,861 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:38:06,861 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:38:06,863 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:38:06,863 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:38:06,864 INFO [master/b762025f20c5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T22:38:06,875 INFO [master/b762025f20c5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T22:38:06,883 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:38:06,884 DEBUG [RS:1;b762025f20c5:44097 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c00cfb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:06,886 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:38:06,887 DEBUG [RS:2;b762025f20c5:45281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a1aafc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:06,888 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:38:06,889 DEBUG [RS:0;b762025f20c5:46783 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@264ce135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:06,900 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b762025f20c5,45863,1733956684084 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T22:38:06,910 DEBUG [RS:1;b762025f20c5:44097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b762025f20c5:44097 2024-12-11T22:38:06,910 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b762025f20c5:46783 2024-12-11T22:38:06,913 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b762025f20c5:45281 2024-12-11T22:38:06,916 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:06,917 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:06,917 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:06,917 INFO [RS:1;b762025f20c5:44097 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:38:06,917 INFO [RS:1;b762025f20c5:44097 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:38:06,917 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:06,917 DEBUG [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:38:06,918 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b762025f20c5:0, corePoolSize=10, maxPoolSize=10 2024-12-11T22:38:06,918 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:06,918 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:06,918 INFO [RS:2;b762025f20c5:45281 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:38:06,918 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:06,918 INFO [RS:2;b762025f20c5:45281 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:38:06,918 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:38:06,919 INFO [RS:0;b762025f20c5:46783 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:38:06,919 INFO [RS:0;b762025f20c5:46783 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:38:06,919 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:38:06,921 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,45863,1733956684084 with port=44097, startcode=1733956685162 2024-12-11T22:38:06,921 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,45863,1733956684084 with port=45281, startcode=1733956685238 2024-12-11T22:38:06,921 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,45863,1733956684084 with port=46783, startcode=1733956685041 2024-12-11T22:38:06,930 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:06,931 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T22:38:06,931 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733956716931 2024-12-11T22:38:06,933 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T22:38:06,935 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T22:38:06,939 DEBUG [RS:0;b762025f20c5:46783 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:38:06,939 DEBUG [RS:2;b762025f20c5:45281 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:38:06,941 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T22:38:06,942 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T22:38:06,942 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T22:38:06,942 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T22:38:06,947 DEBUG [RS:1;b762025f20c5:44097 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:38:06,951 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:06,951 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T22:38:06,956 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:06,976 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T22:38:06,977 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:06,977 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:06,977 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T22:38:06,978 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T22:38:06,985 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T22:38:06,986 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T22:38:06,993 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733956686987,5,FailOnTimeoutGroup] 2024-12-11T22:38:06,995 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733956686993,5,FailOnTimeoutGroup] 2024-12-11T22:38:06,995 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:06,996 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T22:38:06,997 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:06,998 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,002 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:37678 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37678 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:07,014 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55811, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:38:07,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-11T22:38:07,020 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46537, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:38:07,021 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33653, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:38:07,025 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:07,027 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45863 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,46783,1733956685041 2024-12-11T22:38:07,027 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T22:38:07,028 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049 2024-12-11T22:38:07,030 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45863 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,46783,1733956685041 2024-12-11T22:38:07,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45863 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,44097,1733956685162 2024-12-11T22:38:07,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45863 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,44097,1733956685162 2024-12-11T22:38:07,050 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45863 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,050 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45863 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,053 DEBUG [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049 2024-12-11T22:38:07,053 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049 2024-12-11T22:38:07,053 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43691 2024-12-11T22:38:07,053 DEBUG [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43691 2024-12-11T22:38:07,053 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:38:07,053 DEBUG [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:38:07,056 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:07,056 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:07,058 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049 2024-12-11T22:38:07,058 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43691 2024-12-11T22:38:07,058 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:38:07,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:07,075 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:34738 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:42667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34738 dst: /127.0.0.1:42667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:07,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-11T22:38:07,115 DEBUG [RS:0;b762025f20c5:46783 {}] zookeeper.ZKUtil(111): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,46783,1733956685041 2024-12-11T22:38:07,115 WARN [RS:0;b762025f20c5:46783 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:07,115 DEBUG [RS:1;b762025f20c5:44097 {}] zookeeper.ZKUtil(111): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,44097,1733956685162 2024-12-11T22:38:07,116 WARN [RS:1;b762025f20c5:44097 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:07,116 INFO [RS:0;b762025f20c5:46783 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:38:07,116 INFO [RS:1;b762025f20c5:44097 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:38:07,116 DEBUG [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,44097,1733956685162 2024-12-11T22:38:07,116 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,46783,1733956685041 2024-12-11T22:38:07,117 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,46783,1733956685041] 2024-12-11T22:38:07,117 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,45281,1733956685238] 2024-12-11T22:38:07,117 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,44097,1733956685162] 2024-12-11T22:38:07,120 DEBUG [RS:2;b762025f20c5:45281 {}] zookeeper.ZKUtil(111): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,120 WARN [RS:2;b762025f20c5:45281 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:07,120 INFO [RS:2;b762025f20c5:45281 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:38:07,120 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,157 INFO [RS:1;b762025f20c5:44097 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:38:07,160 INFO [RS:0;b762025f20c5:46783 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:38:07,160 INFO [RS:2;b762025f20c5:45281 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:38:07,198 INFO [RS:1;b762025f20c5:44097 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:38:07,203 INFO [RS:2;b762025f20c5:45281 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:38:07,213 INFO [RS:0;b762025f20c5:46783 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:38:07,214 INFO [RS:2;b762025f20c5:45281 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:38:07,214 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,216 INFO [RS:0;b762025f20c5:46783 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:38:07,216 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,216 INFO [RS:1;b762025f20c5:44097 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:38:07,216 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,218 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:38:07,218 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:38:07,219 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:38:07,225 INFO [RS:2;b762025f20c5:45281 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:38:07,226 INFO [RS:1;b762025f20c5:44097 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:38:07,227 INFO [RS:0;b762025f20c5:46783 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:38:07,228 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,228 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,228 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,228 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,228 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,229 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,229 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,229 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:07,229 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,229 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,229 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,230 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,230 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,230 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,230 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:07,230 DEBUG [RS:0;b762025f20c5:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:07,231 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,231 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,231 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:07,232 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,232 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,232 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,232 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,232 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,233 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,233 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:07,233 DEBUG [RS:1;b762025f20c5:44097 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:07,233 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,233 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:07,233 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,233 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,233 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,234 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,234 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,234 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:07,234 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:07,234 DEBUG [RS:2;b762025f20c5:45281 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:07,251 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,252 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,252 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,252 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,252 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,252 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,46783,1733956685041-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:07,262 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,262 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,262 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,263 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,263 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,263 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956685238-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:07,269 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,269 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,269 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,272 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,272 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,272 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,44097,1733956685162-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:07,302 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:38:07,306 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,44097,1733956685162-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,306 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,306 INFO [RS:1;b762025f20c5:44097 {}] regionserver.Replication(171): b762025f20c5,44097,1733956685162 started 2024-12-11T22:38:07,323 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:38:07,324 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,46783,1733956685041-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,324 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,325 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:38:07,325 INFO [RS:0;b762025f20c5:46783 {}] regionserver.Replication(171): b762025f20c5,46783,1733956685041 started 2024-12-11T22:38:07,331 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956685238-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,332 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,332 INFO [RS:2;b762025f20c5:45281 {}] regionserver.Replication(171): b762025f20c5,45281,1733956685238 started 2024-12-11T22:38:07,339 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,345 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,44097,1733956685162, RpcServer on b762025f20c5/172.17.0.2:44097, sessionid=0x100cb7d24180002 2024-12-11T22:38:07,347 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:38:07,347 DEBUG [RS:1;b762025f20c5:44097 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,44097,1733956685162 2024-12-11T22:38:07,350 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,44097,1733956685162' 2024-12-11T22:38:07,351 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:38:07,355 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:38:07,360 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:38:07,360 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:38:07,360 DEBUG [RS:1;b762025f20c5:44097 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,44097,1733956685162 2024-12-11T22:38:07,360 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,44097,1733956685162' 2024-12-11T22:38:07,360 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:38:07,364 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:38:07,364 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,365 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,45281,1733956685238, RpcServer on b762025f20c5/172.17.0.2:45281, sessionid=0x100cb7d24180003 2024-12-11T22:38:07,366 DEBUG [RS:1;b762025f20c5:44097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:38:07,367 INFO [RS:1;b762025f20c5:44097 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:38:07,367 INFO [RS:1;b762025f20c5:44097 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:38:07,367 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:38:07,367 DEBUG [RS:2;b762025f20c5:45281 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,367 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,45281,1733956685238' 2024-12-11T22:38:07,367 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:07,367 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:38:07,369 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,46783,1733956685041, RpcServer on b762025f20c5/172.17.0.2:46783, sessionid=0x100cb7d24180001 2024-12-11T22:38:07,369 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:38:07,369 DEBUG [RS:0;b762025f20c5:46783 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,46783,1733956685041 2024-12-11T22:38:07,369 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,46783,1733956685041' 2024-12-11T22:38:07,369 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:38:07,373 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:38:07,379 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:38:07,383 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:38:07,384 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:38:07,384 DEBUG [RS:0;b762025f20c5:46783 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,46783,1733956685041 2024-12-11T22:38:07,384 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,46783,1733956685041' 2024-12-11T22:38:07,384 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:38:07,387 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:38:07,387 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:38:07,387 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:38:07,387 DEBUG [RS:2;b762025f20c5:45281 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,387 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,45281,1733956685238' 2024-12-11T22:38:07,387 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:38:07,392 DEBUG [RS:0;b762025f20c5:46783 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:38:07,392 INFO [RS:0;b762025f20c5:46783 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:38:07,392 INFO [RS:0;b762025f20c5:46783 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:38:07,393 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:38:07,399 DEBUG [RS:2;b762025f20c5:45281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:38:07,399 INFO [RS:2;b762025f20c5:45281 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:38:07,399 INFO [RS:2;b762025f20c5:45281 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:38:07,473 INFO [RS:1;b762025f20c5:44097 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:38:07,476 INFO [RS:1;b762025f20c5:44097 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C44097%2C1733956685162, suffix=, logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,44097,1733956685162, archiveDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs, maxLogs=32 2024-12-11T22:38:07,494 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:07,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:07,498 INFO [RS:0;b762025f20c5:46783 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:38:07,500 INFO [RS:2;b762025f20c5:45281 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T22:38:07,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T22:38:07,503 INFO [RS:0;b762025f20c5:46783 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C46783%2C1733956685041, suffix=, logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,46783,1733956685041, archiveDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs, maxLogs=32 2024-12-11T22:38:07,504 INFO [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C45281%2C1733956685238, suffix=, logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238, archiveDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs, maxLogs=32 2024-12-11T22:38:07,511 DEBUG [RS:1;b762025f20c5:44097 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,44097,1733956685162/b762025f20c5%2C44097%2C1733956685162.1733956687479, exclude list is [], retry=0 2024-12-11T22:38:07,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T22:38:07,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40975,DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff,DISK] 2024-12-11T22:38:07,538 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:07,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42667,DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1,DISK] 2024-12-11T22:38:07,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:07,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T22:38:07,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39123,DS-e90eca3c-555b-4efd-ab6d-aadeb110df74,DISK] 2024-12-11T22:38:07,549 DEBUG [RS:2;b762025f20c5:45281 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238/b762025f20c5%2C45281%2C1733956685238.1733956687506, exclude list is [], retry=0 2024-12-11T22:38:07,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T22:38:07,553 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:07,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:07,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T22:38:07,592 DEBUG [RS:0;b762025f20c5:46783 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,46783,1733956685041/b762025f20c5%2C46783%2C1733956685041.1733956687505, exclude list is [], retry=0 2024-12-11T22:38:07,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42667,DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1,DISK] 2024-12-11T22:38:07,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T22:38:07,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:07,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39123,DS-e90eca3c-555b-4efd-ab6d-aadeb110df74,DISK] 2024-12-11T22:38:07,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:07,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T22:38:07,600 INFO [RS:1;b762025f20c5:44097 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,44097,1733956685162/b762025f20c5%2C44097%2C1733956685162.1733956687479 2024-12-11T22:38:07,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40975,DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff,DISK] 2024-12-11T22:38:07,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42667,DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1,DISK] 2024-12-11T22:38:07,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40975,DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff,DISK] 2024-12-11T22:38:07,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39123,DS-e90eca3c-555b-4efd-ab6d-aadeb110df74,DISK] 2024-12-11T22:38:07,606 DEBUG [RS:1;b762025f20c5:44097 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34717:34717),(127.0.0.1/127.0.0.1:38855:38855),(127.0.0.1/127.0.0.1:41491:41491)] 2024-12-11T22:38:07,615 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T22:38:07,616 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:07,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:07,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T22:38:07,630 INFO [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238/b762025f20c5%2C45281%2C1733956685238.1733956687506 2024-12-11T22:38:07,631 DEBUG [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38855:38855),(127.0.0.1/127.0.0.1:41491:41491),(127.0.0.1/127.0.0.1:34717:34717)] 2024-12-11T22:38:07,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740 2024-12-11T22:38:07,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740 2024-12-11T22:38:07,641 INFO [RS:0;b762025f20c5:46783 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,46783,1733956685041/b762025f20c5%2C46783%2C1733956685041.1733956687505 2024-12-11T22:38:07,642 DEBUG [RS:0;b762025f20c5:46783 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34717:34717),(127.0.0.1/127.0.0.1:41491:41491),(127.0.0.1/127.0.0.1:38855:38855)] 2024-12-11T22:38:07,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T22:38:07,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T22:38:07,651 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:38:07,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T22:38:07,670 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:38:07,671 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64380848, jitterRate=-0.04065060615539551}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:38:07,674 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733956687498Initializing all the Stores at 1733956687502 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956687502Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956687502Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956687502Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956687502Cleaning up temporary data from old regions at 1733956687650 (+148 ms)Region opened successfully at 1733956687674 (+24 ms) 2024-12-11T22:38:07,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T22:38:07,675 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T22:38:07,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T22:38:07,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T22:38:07,675 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T22:38:07,677 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T22:38:07,677 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733956687675Disabling compacts and flushes for region at 1733956687675Disabling writes for close at 1733956687675Writing region close event to WAL at 1733956687676 (+1 ms)Closed at 1733956687677 (+1 ms) 2024-12-11T22:38:07,684 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:07,684 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T22:38:07,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T22:38:07,708 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T22:38:07,714 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T22:38:07,867 DEBUG [b762025f20c5:45863 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T22:38:07,878 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:38:07,887 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:38:07,887 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:38:07,888 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:38:07,888 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:38:07,888 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:38:07,888 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:38:07,888 INFO [b762025f20c5:45863 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:38:07,888 INFO [b762025f20c5:45863 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:38:07,888 INFO [b762025f20c5:45863 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:38:07,888 DEBUG [b762025f20c5:45863 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:38:07,898 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b762025f20c5,45281,1733956685238 2024-12-11T22:38:07,904 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b762025f20c5,45281,1733956685238, state=OPENING 2024-12-11T22:38:07,937 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T22:38:07,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:07,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:07,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:07,959 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:07,959 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:07,959 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:07,962 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:07,963 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:07,964 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T22:38:07,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b762025f20c5,45281,1733956685238}] 2024-12-11T22:38:08,147 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:38:08,150 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60455, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:38:08,166 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T22:38:08,167 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T22:38:08,168 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T22:38:08,172 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C45281%2C1733956685238.meta, suffix=.meta, logDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238, archiveDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs, maxLogs=32 2024-12-11T22:38:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-11T22:38:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-11T22:38:08,194 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238/b762025f20c5%2C45281%2C1733956685238.meta.1733956688174.meta, exclude list is [], retry=0 2024-12-11T22:38:08,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39123,DS-e90eca3c-555b-4efd-ab6d-aadeb110df74,DISK] 2024-12-11T22:38:08,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42667,DS-d2de7773-efa5-4862-942b-c1fc0c1ac5c1,DISK] 2024-12-11T22:38:08,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40975,DS-5cbbc45c-4e55-4dcc-9e06-13f1ee8d22ff,DISK] 2024-12-11T22:38:08,204 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,45281,1733956685238/b762025f20c5%2C45281%2C1733956685238.meta.1733956688174.meta 2024-12-11T22:38:08,205 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41491:41491),(127.0.0.1/127.0.0.1:38855:38855),(127.0.0.1/127.0.0.1:34717:34717)] 2024-12-11T22:38:08,205 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:38:08,206 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T22:38:08,209 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T22:38:08,213 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T22:38:08,217 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T22:38:08,218 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:08,218 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T22:38:08,218 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T22:38:08,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T22:38:08,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T22:38:08,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:08,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:08,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T22:38:08,229 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T22:38:08,229 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:08,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:08,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T22:38:08,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T22:38:08,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:08,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:08,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T22:38:08,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T22:38:08,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:08,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:08,237 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T22:38:08,239 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740 2024-12-11T22:38:08,242 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740 2024-12-11T22:38:08,244 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T22:38:08,244 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T22:38:08,245 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:38:08,249 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T22:38:08,251 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72305558, jitterRate=0.07743677496910095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:38:08,251 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T22:38:08,252 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733956688219Writing region info on filesystem at 1733956688219Initializing all the Stores at 1733956688222 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956688222Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956688222Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956688222Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956688223 (+1 ms)Cleaning up temporary data from old regions at 1733956688244 (+21 ms)Running coprocessor post-open hooks at 1733956688251 (+7 ms)Region opened successfully at 1733956688252 (+1 ms) 2024-12-11T22:38:08,259 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733956688139 2024-12-11T22:38:08,278 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T22:38:08,279 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T22:38:08,281 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,45281,1733956685238 2024-12-11T22:38:08,284 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b762025f20c5,45281,1733956685238, state=OPEN 2024-12-11T22:38:08,298 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:08,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:08,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:08,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:08,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:08,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:08,298 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:08,299 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b762025f20c5,45281,1733956685238 2024-12-11T22:38:08,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T22:38:08,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b762025f20c5,45281,1733956685238 in 333 msec 2024-12-11T22:38:08,307 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:08,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T22:38:08,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 618 msec 2024-12-11T22:38:08,318 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:08,319 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T22:38:08,346 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:38:08,348 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,45281,1733956685238, seqNum=-1] 2024-12-11T22:38:08,432 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:38:08,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58359, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:38:08,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6780 sec 2024-12-11T22:38:08,484 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733956688484, completionTime=-1 2024-12-11T22:38:08,490 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T22:38:08,490 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T22:38:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-11T22:38:08,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-11T22:38:08,546 INFO [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T22:38:08,547 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733956748547 2024-12-11T22:38:08,547 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733956808547 2024-12-11T22:38:08,547 INFO [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 56 msec 2024-12-11T22:38:08,550 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:38:08,561 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45863,1733956684084-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,561 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45863,1733956684084-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,561 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45863,1733956684084-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,567 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b762025f20c5:45863, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,568 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,569 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,578 DEBUG [master/b762025f20c5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T22:38:08,627 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.109sec 2024-12-11T22:38:08,633 INFO [master/b762025f20c5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T22:38:08,635 INFO [master/b762025f20c5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T22:38:08,636 INFO [master/b762025f20c5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T22:38:08,637 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T22:38:08,637 INFO [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T22:38:08,639 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45863,1733956684084-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:08,639 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45863,1733956684084-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T22:38:08,649 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T22:38:08,650 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T22:38:08,651 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45863,1733956684084-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:08,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@727e1c6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:38:08,659 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T22:38:08,659 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T22:38:08,665 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,45863,-1 for getting cluster id 2024-12-11T22:38:08,668 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:38:08,680 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '191af486-7db3-45e0-8de2-f6b4aa97bbc5' 2024-12-11T22:38:08,684 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:38:08,685 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "191af486-7db3-45e0-8de2-f6b4aa97bbc5" 2024-12-11T22:38:08,685 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ba1ab8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:38:08,685 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,45863,-1] 2024-12-11T22:38:08,688 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:38:08,691 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:08,700 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:38:08,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aa1508d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:38:08,705 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:38:08,715 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,45281,1733956685238, seqNum=-1] 2024-12-11T22:38:08,716 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:38:08,719 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:38:08,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b762025f20c5,45863,1733956684084 2024-12-11T22:38:08,767 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T22:38:08,784 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b762025f20c5,45863,1733956684084 2024-12-11T22:38:08,787 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@e1d7d6 2024-12-11T22:38:08,789 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T22:38:08,796 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T22:38:08,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:38:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T22:38:08,821 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:38:08,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T22:38:08,824 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:08,827 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:38:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:08,892 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:08,892 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:08,950 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:47538 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47538 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:08,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-11T22:38:08,987 WARN [PEWorker-4 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:08,996 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 225797fb2ab96972cc0610f5aad22b88, NAME => 'TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049 2024-12-11T22:38:09,038 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:09,038 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:09,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:45970 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45970 dst: /127.0.0.1:42667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:09,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-11T22:38:09,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:09,456 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:09,457 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:09,457 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 225797fb2ab96972cc0610f5aad22b88, disabling compactions & flushes 2024-12-11T22:38:09,457 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,458 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,458 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. after waiting 0 ms 2024-12-11T22:38:09,458 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,458 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,458 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 225797fb2ab96972cc0610f5aad22b88: Waiting for close lock at 1733956689457Disabling compacts and flushes for region at 1733956689457Disabling writes for close at 1733956689458 (+1 ms)Writing region close event to WAL at 1733956689458Closed at 1733956689458 2024-12-11T22:38:09,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:09,463 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:38:09,471 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733956689464"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733956689464"}]},"ts":"1733956689464"} 2024-12-11T22:38:09,494 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T22:38:09,506 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:38:09,512 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733956689506"}]},"ts":"1733956689506"} 2024-12-11T22:38:09,523 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T22:38:09,524 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:38:09,532 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:38:09,532 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:38:09,532 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:38:09,532 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:38:09,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=225797fb2ab96972cc0610f5aad22b88, ASSIGN}] 2024-12-11T22:38:09,542 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=225797fb2ab96972cc0610f5aad22b88, ASSIGN 2024-12-11T22:38:09,548 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=225797fb2ab96972cc0610f5aad22b88, ASSIGN; state=OFFLINE, location=b762025f20c5,46783,1733956685041; forceNewPlan=false, retain=false 2024-12-11T22:38:09,702 INFO [b762025f20c5:45863 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T22:38:09,703 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=225797fb2ab96972cc0610f5aad22b88, regionState=OPENING, regionLocation=b762025f20c5,46783,1733956685041 2024-12-11T22:38:09,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=225797fb2ab96972cc0610f5aad22b88, ASSIGN because future has completed 2024-12-11T22:38:09,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 225797fb2ab96972cc0610f5aad22b88, server=b762025f20c5,46783,1733956685041}] 2024-12-11T22:38:09,867 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:38:09,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60891, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:38:09,877 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,877 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 225797fb2ab96972cc0610f5aad22b88, NAME => 'TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:38:09,878 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,878 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:09,878 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,878 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,882 INFO [StoreOpener-225797fb2ab96972cc0610f5aad22b88-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,885 INFO [StoreOpener-225797fb2ab96972cc0610f5aad22b88-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 225797fb2ab96972cc0610f5aad22b88 columnFamilyName cf 2024-12-11T22:38:09,885 DEBUG [StoreOpener-225797fb2ab96972cc0610f5aad22b88-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:09,887 INFO [StoreOpener-225797fb2ab96972cc0610f5aad22b88-1 {}] regionserver.HStore(327): Store=225797fb2ab96972cc0610f5aad22b88/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:09,888 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,890 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,895 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,896 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,897 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,905 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,916 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:38:09,917 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 225797fb2ab96972cc0610f5aad22b88; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60333378, jitterRate=-0.10096260905265808}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:38:09,918 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:09,918 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 225797fb2ab96972cc0610f5aad22b88: Running coprocessor pre-open hook at 1733956689878Writing region info on filesystem at 1733956689878Initializing all the Stores at 1733956689881 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956689881Cleaning up temporary data from old regions at 1733956689897 (+16 ms)Running coprocessor post-open hooks at 1733956689918 (+21 ms)Region opened successfully at 1733956689918 2024-12-11T22:38:09,924 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88., pid=6, masterSystemTime=1733956689867 2024-12-11T22:38:09,928 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,928 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:09,930 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=225797fb2ab96972cc0610f5aad22b88, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,46783,1733956685041 2024-12-11T22:38:09,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 225797fb2ab96972cc0610f5aad22b88, server=b762025f20c5,46783,1733956685041 because future has completed 2024-12-11T22:38:09,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T22:38:09,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 225797fb2ab96972cc0610f5aad22b88, server=b762025f20c5,46783,1733956685041 in 233 msec 2024-12-11T22:38:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:09,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T22:38:09,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=225797fb2ab96972cc0610f5aad22b88, ASSIGN in 431 msec 2024-12-11T22:38:09,981 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:38:09,981 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733956689981"}]},"ts":"1733956689981"} 2024-12-11T22:38:09,987 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T22:38:09,993 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:38:10,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 1.1850 sec 2024-12-11T22:38:10,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:10,986 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T22:38:10,986 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T22:38:10,988 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:38:10,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T22:38:10,998 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:38:11,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T22:38:11,022 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88., hostname=b762025f20c5,46783,1733956685041, seqNum=2] 2024-12-11T22:38:11,024 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:38:11,032 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:38:11,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T22:38:11,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T22:38:11,058 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T22:38:11,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:11,062 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T22:38:11,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T22:38:11,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775693_1015 (size=32) 2024-12-11T22:38:11,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775692_1015 (size=32) 2024-12-11T22:38:11,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-11T22:38:11,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-11T22:38:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-11T22:38:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-11T22:38:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-11T22:38:11,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-11T22:38:11,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:11,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46783 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T22:38:11,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:11,255 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 225797fb2ab96972cc0610f5aad22b88 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T22:38:11,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/.tmp/cf/8fda734dbb1749d4b4e0e660d68f71df is 36, key is row/cf:cq/1733956691033/Put/seqid=0 2024-12-11T22:38:11,366 WARN [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:11,367 WARN [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:11,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1168523005_22 at /127.0.0.1:36840 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36840 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:11,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-11T22:38:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-11T22:38:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-11T22:38:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:11,823 WARN [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:11,827 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/.tmp/cf/8fda734dbb1749d4b4e0e660d68f71df 2024-12-11T22:38:11,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/.tmp/cf/8fda734dbb1749d4b4e0e660d68f71df as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/cf/8fda734dbb1749d4b4e0e660d68f71df 2024-12-11T22:38:11,966 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/cf/8fda734dbb1749d4b4e0e660d68f71df, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T22:38:11,989 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 225797fb2ab96972cc0610f5aad22b88 in 735ms, sequenceid=5, compaction requested=false 2024-12-11T22:38:11,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-11T22:38:12,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 225797fb2ab96972cc0610f5aad22b88: 2024-12-11T22:38:12,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:12,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T22:38:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T22:38:12,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T22:38:12,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 955 msec 2024-12-11T22:38:12,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 984 msec 2024-12-11T22:38:12,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45863 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:12,203 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T22:38:12,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T22:38:12,223 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T22:38:12,224 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:12,231 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,233 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:38:12,233 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T22:38:12,233 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=570673981, stopped=false 2024-12-11T22:38:12,234 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b762025f20c5,45863,1733956684084 2024-12-11T22:38:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:12,240 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:12,240 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:12,240 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T22:38:12,241 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T22:38:12,241 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:12,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,241 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:12,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:12,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:12,243 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:12,245 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,46783,1733956685041' ***** 2024-12-11T22:38:12,245 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T22:38:12,245 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,44097,1733956685162' ***** 2024-12-11T22:38:12,245 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T22:38:12,245 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,45281,1733956685238' ***** 2024-12-11T22:38:12,245 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T22:38:12,245 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T22:38:12,245 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T22:38:12,246 INFO [RS:1;b762025f20c5:44097 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T22:38:12,246 INFO [RS:2;b762025f20c5:45281 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T22:38:12,246 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T22:38:12,246 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T22:38:12,246 INFO [RS:1;b762025f20c5:44097 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T22:38:12,246 INFO [RS:2;b762025f20c5:45281 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T22:38:12,246 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,44097,1733956685162 2024-12-11T22:38:12,246 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,45281,1733956685238 2024-12-11T22:38:12,246 INFO [RS:1;b762025f20c5:44097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:12,246 INFO [RS:2;b762025f20c5:45281 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:12,246 INFO [RS:2;b762025f20c5:45281 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b762025f20c5:45281. 2024-12-11T22:38:12,246 INFO [RS:1;b762025f20c5:44097 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b762025f20c5:44097. 2024-12-11T22:38:12,246 DEBUG [RS:1;b762025f20c5:44097 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:12,246 DEBUG [RS:2;b762025f20c5:45281 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:12,246 DEBUG [RS:1;b762025f20c5:44097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,246 DEBUG [RS:2;b762025f20c5:45281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,246 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,44097,1733956685162; all regions closed. 2024-12-11T22:38:12,247 INFO [RS:2;b762025f20c5:45281 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T22:38:12,249 INFO [RS:2;b762025f20c5:45281 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T22:38:12,249 INFO [RS:2;b762025f20c5:45281 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T22:38:12,250 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T22:38:12,250 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T22:38:12,250 INFO [RS:0;b762025f20c5:46783 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T22:38:12,250 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T22:38:12,250 INFO [RS:0;b762025f20c5:46783 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T22:38:12,250 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(3091): Received CLOSE for 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:12,253 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T22:38:12,253 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T22:38:12,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741826_1016 (size=93) 2024-12-11T22:38:12,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741826_1016 (size=93) 2024-12-11T22:38:12,259 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T22:38:12,260 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T22:38:12,260 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,46783,1733956685041 2024-12-11T22:38:12,260 INFO [RS:0;b762025f20c5:46783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:12,260 INFO [RS:0;b762025f20c5:46783 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b762025f20c5:46783. 2024-12-11T22:38:12,260 DEBUG [RS:0;b762025f20c5:46783 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:12,260 DEBUG [RS:0;b762025f20c5:46783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,260 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T22:38:12,260 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T22:38:12,260 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1325): Online Regions={225797fb2ab96972cc0610f5aad22b88=TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88.} 2024-12-11T22:38:12,261 DEBUG [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1351): Waiting on 225797fb2ab96972cc0610f5aad22b88 2024-12-11T22:38:12,261 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T22:38:12,261 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T22:38:12,261 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T22:38:12,261 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T22:38:12,261 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T22:38:12,261 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 225797fb2ab96972cc0610f5aad22b88, disabling compactions & flushes 2024-12-11T22:38:12,261 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:12,261 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:12,262 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. after waiting 1 ms 2024-12-11T22:38:12,262 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T22:38:12,262 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:12,262 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T22:38:12,262 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T22:38:12,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_1073741826_1016 (size=93) 2024-12-11T22:38:12,270 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T22:38:12,270 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T22:38:12,281 DEBUG [RS:1;b762025f20c5:44097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs 2024-12-11T22:38:12,281 INFO [RS:1;b762025f20c5:44097 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C44097%2C1733956685162:(num 1733956687479) 2024-12-11T22:38:12,281 DEBUG [RS:1;b762025f20c5:44097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,281 INFO [RS:1;b762025f20c5:44097 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:12,281 INFO [RS:1;b762025f20c5:44097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:12,282 INFO [RS:1;b762025f20c5:44097 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:12,282 INFO [RS:1;b762025f20c5:44097 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T22:38:12,283 INFO [RS:1;b762025f20c5:44097 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T22:38:12,283 INFO [RS:1;b762025f20c5:44097 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T22:38:12,283 INFO [RS:1;b762025f20c5:44097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:12,283 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:12,283 INFO [RS:1;b762025f20c5:44097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44097 2024-12-11T22:38:12,300 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:12,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,44097,1733956685162 2024-12-11T22:38:12,300 INFO [RS:1;b762025f20c5:44097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:12,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:12,303 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:12,305 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:12,307 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/default/TestHBaseWalOnEC/225797fb2ab96972cc0610f5aad22b88/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T22:38:12,307 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/info/1201fb402a8d437e8e302375effac878 is 153, key is TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88./info:regioninfo/1733956689929/Put/seqid=0 2024-12-11T22:38:12,308 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:12,308 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 225797fb2ab96972cc0610f5aad22b88: Waiting for close lock at 1733956692261Running coprocessor pre-close hooks at 1733956692261Disabling compacts and flushes for region at 1733956692261Disabling writes for close at 1733956692262 (+1 ms)Writing region close event to WAL at 1733956692268 (+6 ms)Running coprocessor post-close hooks at 1733956692308 (+40 ms)Closed at 1733956692308 2024-12-11T22:38:12,309 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733956688798.225797fb2ab96972cc0610f5aad22b88. 2024-12-11T22:38:12,311 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:12,312 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:12,320 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,44097,1733956685162] 2024-12-11T22:38:12,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413448402_22 at /127.0.0.1:47610 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47610 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:12,339 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,44097,1733956685162 already deleted, retry=false 2024-12-11T22:38:12,339 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,44097,1733956685162 expired; onlineServers=2 2024-12-11T22:38:12,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-11T22:38:12,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:12,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44097-0x100cb7d24180002, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:12,418 INFO [RS:1;b762025f20c5:44097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:12,418 INFO [RS:1;b762025f20c5:44097 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,44097,1733956685162; zookeeper connection closed. 2024-12-11T22:38:12,418 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6e52c61d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6e52c61d 2024-12-11T22:38:12,460 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T22:38:12,461 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,46783,1733956685041; all regions closed. 2024-12-11T22:38:12,472 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/WALs/b762025f20c5,46783,1733956685041/b762025f20c5%2C46783%2C1733956685041.1733956687505 not finished, retry = 0 2024-12-11T22:38:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741828_1018 (size=1298) 2024-12-11T22:38:12,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_1073741828_1018 (size=1298) 2024-12-11T22:38:12,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741828_1018 (size=1298) 2024-12-11T22:38:12,577 DEBUG [RS:0;b762025f20c5:46783 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs 2024-12-11T22:38:12,577 INFO [RS:0;b762025f20c5:46783 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C46783%2C1733956685041:(num 1733956687505) 2024-12-11T22:38:12,577 DEBUG [RS:0;b762025f20c5:46783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:12,577 INFO [RS:0;b762025f20c5:46783 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:12,577 INFO [RS:0;b762025f20c5:46783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:12,577 INFO [RS:0;b762025f20c5:46783 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:12,578 INFO [RS:0;b762025f20c5:46783 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T22:38:12,578 INFO [RS:0;b762025f20c5:46783 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T22:38:12,578 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:12,578 INFO [RS:0;b762025f20c5:46783 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T22:38:12,578 INFO [RS:0;b762025f20c5:46783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:12,578 INFO [RS:0;b762025f20c5:46783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46783 2024-12-11T22:38:12,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,46783,1733956685041 2024-12-11T22:38:12,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:12,589 INFO [RS:0;b762025f20c5:46783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:12,598 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,46783,1733956685041] 2024-12-11T22:38:12,606 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,46783,1733956685041 already deleted, retry=false 2024-12-11T22:38:12,606 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,46783,1733956685041 expired; onlineServers=1 2024-12-11T22:38:12,661 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T22:38:12,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:12,698 INFO [RS:0;b762025f20c5:46783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:12,699 INFO [RS:0;b762025f20c5:46783 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,46783,1733956685041; zookeeper connection closed. 2024-12-11T22:38:12,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x100cb7d24180001, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:12,699 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@190d983e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@190d983e 2024-12-11T22:38:12,761 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:12,761 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/info/1201fb402a8d437e8e302375effac878 2024-12-11T22:38:12,797 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/ns/0a003f34ab0b41b5a30659061eba9e18 is 43, key is default/ns:d/1733956688442/Put/seqid=0 2024-12-11T22:38:12,800 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:12,800 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:12,804 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413448402_22 at /127.0.0.1:47620 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47620 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-11T22:38:12,809 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:12,810 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/ns/0a003f34ab0b41b5a30659061eba9e18 2024-12-11T22:38:12,861 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/table/52f3e9648ced4cc4818b7718460a61de is 52, key is TestHBaseWalOnEC/table:state/1733956689981/Put/seqid=0 2024-12-11T22:38:12,861 DEBUG [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T22:38:12,863 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:12,864 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:12,874 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-413448402_22 at /127.0.0.1:47634 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47634 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:12,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-11T22:38:12,883 WARN [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:12,884 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/table/52f3e9648ced4cc4818b7718460a61de 2024-12-11T22:38:12,898 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/info/1201fb402a8d437e8e302375effac878 as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/info/1201fb402a8d437e8e302375effac878 2024-12-11T22:38:12,913 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/info/1201fb402a8d437e8e302375effac878, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T22:38:12,915 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/ns/0a003f34ab0b41b5a30659061eba9e18 as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/ns/0a003f34ab0b41b5a30659061eba9e18 2024-12-11T22:38:12,936 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/ns/0a003f34ab0b41b5a30659061eba9e18, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T22:38:12,938 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/.tmp/table/52f3e9648ced4cc4818b7718460a61de as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/table/52f3e9648ced4cc4818b7718460a61de 2024-12-11T22:38:12,954 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/table/52f3e9648ced4cc4818b7718460a61de, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T22:38:12,960 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 698ms, sequenceid=11, compaction requested=false 2024-12-11T22:38:12,960 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T22:38:13,011 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T22:38:13,013 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T22:38:13,013 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T22:38:13,014 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733956692260Running coprocessor pre-close hooks at 1733956692261 (+1 ms)Disabling compacts and flushes for region at 1733956692261Disabling writes for close at 1733956692261Obtaining lock to block concurrent updates at 1733956692262 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733956692262Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733956692263 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733956692265 (+2 ms)Flushing 1588230740/info: creating writer at 1733956692265Flushing 1588230740/info: appending metadata at 1733956692303 (+38 ms)Flushing 1588230740/info: closing flushed file at 1733956692303Flushing 1588230740/ns: creating writer at 1733956692777 (+474 ms)Flushing 1588230740/ns: appending metadata at 1733956692796 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733956692796Flushing 1588230740/table: creating writer at 1733956692823 (+27 ms)Flushing 1588230740/table: appending metadata at 1733956692860 (+37 ms)Flushing 1588230740/table: closing flushed file at 1733956692860Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55426320: reopening flushed file at 1733956692896 (+36 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a7dec7f: reopening flushed file at 1733956692913 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7093d636: reopening flushed file at 1733956692936 (+23 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 698ms, sequenceid=11, compaction requested=false at 1733956692960 (+24 ms)Writing region close event to WAL at 1733956692970 (+10 ms)Running coprocessor post-close hooks at 1733956693013 (+43 ms)Closed at 1733956693013 2024-12-11T22:38:13,014 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T22:38:13,061 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,45281,1733956685238; all regions closed. 2024-12-11T22:38:13,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741829_1019 (size=2751) 2024-12-11T22:38:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741829_1019 (size=2751) 2024-12-11T22:38:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_1073741829_1019 (size=2751) 2024-12-11T22:38:13,083 DEBUG [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs 2024-12-11T22:38:13,083 INFO [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C45281%2C1733956685238.meta:.meta(num 1733956688174) 2024-12-11T22:38:13,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741827_1017 (size=93) 2024-12-11T22:38:13,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741827_1017 (size=93) 2024-12-11T22:38:13,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_1073741827_1017 (size=93) 2024-12-11T22:38:13,120 DEBUG [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/oldWALs 2024-12-11T22:38:13,120 INFO [RS:2;b762025f20c5:45281 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b762025f20c5%2C45281%2C1733956685238:(num 1733956687506) 2024-12-11T22:38:13,120 DEBUG [RS:2;b762025f20c5:45281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:13,120 INFO [RS:2;b762025f20c5:45281 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:13,120 INFO [RS:2;b762025f20c5:45281 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:13,121 INFO [RS:2;b762025f20c5:45281 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:13,121 INFO [RS:2;b762025f20c5:45281 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:13,121 INFO [RS:2;b762025f20c5:45281 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45281 2024-12-11T22:38:13,121 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:13,131 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,45281,1733956685238 2024-12-11T22:38:13,131 INFO [RS:2;b762025f20c5:45281 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:13,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:13,148 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,45281,1733956685238] 2024-12-11T22:38:13,156 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,45281,1733956685238 already deleted, retry=false 2024-12-11T22:38:13,156 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,45281,1733956685238 expired; onlineServers=0 2024-12-11T22:38:13,156 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b762025f20c5,45863,1733956684084' ***** 2024-12-11T22:38:13,156 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T22:38:13,156 INFO [M:0;b762025f20c5:45863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:13,156 INFO [M:0;b762025f20c5:45863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:13,157 DEBUG [M:0;b762025f20c5:45863 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T22:38:13,157 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T22:38:13,157 DEBUG [M:0;b762025f20c5:45863 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T22:38:13,157 DEBUG [master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733956686993 {}] cleaner.HFileCleaner(306): Exit Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733956686993,5,FailOnTimeoutGroup] 2024-12-11T22:38:13,157 INFO [M:0;b762025f20c5:45863 {}] hbase.ChoreService(370): Chore service for: master/b762025f20c5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:13,157 INFO [M:0;b762025f20c5:45863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:13,157 DEBUG [M:0;b762025f20c5:45863 {}] master.HMaster(1795): Stopping service threads 2024-12-11T22:38:13,157 INFO [M:0;b762025f20c5:45863 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T22:38:13,157 INFO [M:0;b762025f20c5:45863 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T22:38:13,158 INFO [M:0;b762025f20c5:45863 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T22:38:13,158 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T22:38:13,159 DEBUG [master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733956686987 {}] cleaner.HFileCleaner(306): Exit Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733956686987,5,FailOnTimeoutGroup] 2024-12-11T22:38:13,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:13,164 DEBUG [M:0;b762025f20c5:45863 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-11T22:38:13,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:13,165 DEBUG [M:0;b762025f20c5:45863 {}] master.ActiveMasterManager(353): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-11T22:38:13,166 INFO [M:0;b762025f20c5:45863 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/.lastflushedseqids 2024-12-11T22:38:13,193 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:13,193 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:13,218 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:47662 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47662 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:13,243 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:13,243 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45281-0x100cb7d24180003, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:13,243 INFO [RS:2;b762025f20c5:45281 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:13,244 INFO [RS:2;b762025f20c5:45281 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,45281,1733956685238; zookeeper connection closed. 2024-12-11T22:38:13,251 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@81d4bbf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@81d4bbf 2024-12-11T22:38:13,252 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T22:38:13,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-11T22:38:13,272 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:13,272 INFO [M:0;b762025f20c5:45863 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T22:38:13,275 INFO [M:0;b762025f20c5:45863 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T22:38:13,275 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T22:38:13,276 INFO [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:13,276 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:13,276 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T22:38:13,276 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:13,276 INFO [M:0;b762025f20c5:45863 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-11T22:38:13,322 DEBUG [M:0;b762025f20c5:45863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8c2d16dbcdb4ad09cb88be0a8e9dca4 is 82, key is hbase:meta,,1/info:regioninfo/1733956688280/Put/seqid=0 2024-12-11T22:38:13,326 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:13,326 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:13,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:47680 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47680 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:13,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-11T22:38:13,738 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T22:38:13,754 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:13,755 INFO [M:0;b762025f20c5:45863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8c2d16dbcdb4ad09cb88be0a8e9dca4 2024-12-11T22:38:13,830 DEBUG [M:0;b762025f20c5:45863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4db27645df3c4684965954af1978b2e2 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733956690006/Put/seqid=0 2024-12-11T22:38:13,841 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:13,841 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:13,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:36854 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36854 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:13,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-11T22:38:13,944 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:13,947 INFO [M:0;b762025f20c5:45863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4db27645df3c4684965954af1978b2e2 2024-12-11T22:38:14,060 DEBUG [M:0;b762025f20c5:45863 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fcfb8bcc868b49d7a73658ae77959096 is 69, key is b762025f20c5,44097,1733956685162/rs:state/1733956687046/Put/seqid=0 2024-12-11T22:38:14,063 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:14,063 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T22:38:14,091 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1841870012_22 at /127.0.0.1:47698 [Receiving block BP-1171211553-172.17.0.2-1733956677892:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39123:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47698 dst: /127.0.0.1:39123 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T22:38:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-11T22:38:14,118 WARN [M:0;b762025f20c5:45863 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T22:38:14,118 INFO [M:0;b762025f20c5:45863 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fcfb8bcc868b49d7a73658ae77959096 2024-12-11T22:38:14,145 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a8c2d16dbcdb4ad09cb88be0a8e9dca4 as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8c2d16dbcdb4ad09cb88be0a8e9dca4 2024-12-11T22:38:14,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_-9223372036854775677_1021 (size=392) 2024-12-11T22:38:14,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775676_1021 (size=392) 2024-12-11T22:38:14,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_-9223372036854775645_1025 (size=4787) 2024-12-11T22:38:14,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_-9223372036854775644_1025 (size=4787) 2024-12-11T22:38:14,179 INFO [M:0;b762025f20c5:45863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a8c2d16dbcdb4ad09cb88be0a8e9dca4, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T22:38:14,181 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4db27645df3c4684965954af1978b2e2 as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4db27645df3c4684965954af1978b2e2 2024-12-11T22:38:14,212 INFO [M:0;b762025f20c5:45863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4db27645df3c4684965954af1978b2e2, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T22:38:14,215 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fcfb8bcc868b49d7a73658ae77959096 as hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fcfb8bcc868b49d7a73658ae77959096 2024-12-11T22:38:14,230 INFO [M:0;b762025f20c5:45863 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fcfb8bcc868b49d7a73658ae77959096, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T22:38:14,240 INFO [M:0;b762025f20c5:45863 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 964ms, sequenceid=72, compaction requested=false 2024-12-11T22:38:14,259 INFO [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:14,259 DEBUG [M:0;b762025f20c5:45863 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733956693275Disabling compacts and flushes for region at 1733956693275Disabling writes for close at 1733956693276 (+1 ms)Obtaining lock to block concurrent updates at 1733956693276Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733956693276Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733956693277 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733956693281 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733956693281Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733956693322 (+41 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733956693322Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733956693767 (+445 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733956693829 (+62 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733956693830 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733956693989 (+159 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733956694059 (+70 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733956694059Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55663660: reopening flushed file at 1733956694142 (+83 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@465d9444: reopening flushed file at 1733956694179 (+37 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6878f07e: reopening flushed file at 1733956694213 (+34 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 964ms, sequenceid=72, compaction requested=false at 1733956694240 (+27 ms)Writing region close event to WAL at 1733956694258 (+18 ms)Closed at 1733956694259 (+1 ms) 2024-12-11T22:38:14,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741825_1011 (size=32674) 2024-12-11T22:38:14,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39123 is added to blk_1073741825_1011 (size=32674) 2024-12-11T22:38:14,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42667 is added to blk_1073741825_1011 (size=32674) 2024-12-11T22:38:14,300 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:14,300 INFO [M:0;b762025f20c5:45863 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T22:38:14,300 INFO [M:0;b762025f20c5:45863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45863 2024-12-11T22:38:14,300 INFO [M:0;b762025f20c5:45863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:14,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:14,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45863-0x100cb7d24180000, quorum=127.0.0.1:50515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:14,421 INFO [M:0;b762025f20c5:45863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:14,447 WARN [BP-1171211553-172.17.0.2-1733956677892 heartbeating to localhost/127.0.0.1:43691 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1171211553-172.17.0.2-1733956677892 (Datanode Uuid 9e9aaf16-589e-4193-91f8-2d242950ef52) service to localhost/127.0.0.1:43691 2024-12-11T22:38:14,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data5/current/BP-1171211553-172.17.0.2-1733956677892 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:14,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data6/current/BP-1171211553-172.17.0.2-1733956677892 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:14,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d005cc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:14,467 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@492d1201{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:14,467 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:14,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a4f4410{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:14,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77df1a06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:14,493 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T22:38:14,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@51be63ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:14,517 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@106158ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:14,517 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:14,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31fc7e57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:14,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f0232ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:14,529 WARN [BP-1171211553-172.17.0.2-1733956677892 heartbeating to localhost/127.0.0.1:43691 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T22:38:14,529 WARN [BP-1171211553-172.17.0.2-1733956677892 heartbeating to localhost/127.0.0.1:43691 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1171211553-172.17.0.2-1733956677892 (Datanode Uuid 7c23c8bb-89ba-4b4a-bdb6-4c3aa697535e) service to localhost/127.0.0.1:43691 2024-12-11T22:38:14,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data3/current/BP-1171211553-172.17.0.2-1733956677892 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:14,530 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T22:38:14,530 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T22:38:14,530 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data4/current/BP-1171211553-172.17.0.2-1733956677892 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:14,532 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T22:38:14,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68c42837{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:14,551 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@736038db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:14,551 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:14,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4802e856{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:14,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26fd7980{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:14,556 WARN [BP-1171211553-172.17.0.2-1733956677892 heartbeating to localhost/127.0.0.1:43691 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T22:38:14,556 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T22:38:14,556 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T22:38:14,556 WARN [BP-1171211553-172.17.0.2-1733956677892 heartbeating to localhost/127.0.0.1:43691 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1171211553-172.17.0.2-1733956677892 (Datanode Uuid da6ceb27-3dca-4dd5-a0a0-9ca23cb9a791) service to localhost/127.0.0.1:43691 2024-12-11T22:38:14,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data2/current/BP-1171211553-172.17.0.2-1733956677892 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:14,558 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/cluster_ba01a385-213d-0db1-213d-6060656b1e3f/data/data1/current/BP-1171211553-172.17.0.2-1733956677892 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:14,558 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T22:38:14,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@753cff0b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T22:38:14,588 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78567fa0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:14,588 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:14,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c6a701e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:14,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b4eb733{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:14,607 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T22:38:14,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T22:38:14,656 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 153), OpenFileDescriptor=450 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=972 (was 957) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6041 (was 5488) - AvailableMemoryMB LEAK? - 2024-12-11T22:38:14,667 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=972, ProcessCount=11, AvailableMemoryMB=6041 2024-12-11T22:38:14,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T22:38:14,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.log.dir so I do NOT create it in target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04 2024-12-11T22:38:14,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d8e8f4b1-6c7b-c80b-496c-e00d9a023fb1/hadoop.tmp.dir so I do NOT create it in target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04 2024-12-11T22:38:14,667 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641, deleteOnExit=true 2024-12-11T22:38:14,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T22:38:14,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/test.cache.data in system properties and HBase conf 2024-12-11T22:38:14,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T22:38:14,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir in system properties and HBase conf 2024-12-11T22:38:14,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T22:38:14,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T22:38:14,668 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T22:38:14,668 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T22:38:14,669 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/nfs.dump.dir in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/java.io.tmpdir in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T22:38:14,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T22:38:14,956 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:14,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:14,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:14,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:14,987 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:38:14,988 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:14,991 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7288ff9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:14,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@94f688b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:15,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64064f3d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/java.io.tmpdir/jetty-localhost-33835-hadoop-hdfs-3_4_1-tests_jar-_-any-12988554966012911954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T22:38:15,125 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@485e3231{HTTP/1.1, (http/1.1)}{localhost:33835} 2024-12-11T22:38:15,125 INFO [Time-limited test {}] server.Server(415): Started @20113ms 2024-12-11T22:38:15,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:15,349 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:15,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:15,351 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:15,351 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:38:15,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22ce3607{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:15,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d0b4a63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:15,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@787fca0d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/java.io.tmpdir/jetty-localhost-35923-hadoop-hdfs-3_4_1-tests_jar-_-any-870522594102761825/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:15,479 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e93ef7e{HTTP/1.1, (http/1.1)}{localhost:35923} 2024-12-11T22:38:15,479 INFO [Time-limited test {}] server.Server(415): Started @20467ms 2024-12-11T22:38:15,481 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:38:15,578 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:15,582 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:15,583 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:15,583 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:15,583 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:38:15,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47e3d455{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:15,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c414977{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:15,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@783cf8b9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/java.io.tmpdir/jetty-localhost-38989-hadoop-hdfs-3_4_1-tests_jar-_-any-8777393703723060519/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:15,709 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b98fb40{HTTP/1.1, (http/1.1)}{localhost:38989} 2024-12-11T22:38:15,710 INFO [Time-limited test {}] server.Server(415): Started @20698ms 2024-12-11T22:38:15,716 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:38:15,791 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T22:38:15,796 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T22:38:15,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T22:38:15,797 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T22:38:15,798 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T22:38:15,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c3e0b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,AVAILABLE} 2024-12-11T22:38:15,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a8aabef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T22:38:15,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@309408a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/java.io.tmpdir/jetty-localhost-34003-hadoop-hdfs-3_4_1-tests_jar-_-any-2220046766489689859/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:15,919 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e780d74{HTTP/1.1, (http/1.1)}{localhost:34003} 2024-12-11T22:38:15,919 INFO [Time-limited test {}] server.Server(415): Started @20907ms 2024-12-11T22:38:15,926 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T22:38:16,662 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data1/current/BP-2052876916-172.17.0.2-1733956694700/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:16,662 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data2/current/BP-2052876916-172.17.0.2-1733956694700/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:16,692 WARN [Thread-516 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:38:16,696 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad6e8681cc564304 with lease ID 0xa9c33e918502d294: Processing first storage report for DS-4a493267-32d5-40f3-b70c-c301e6bfc6de from datanode DatanodeRegistration(127.0.0.1:39871, datanodeUuid=b9ea5724-f543-4ad3-840b-49b6c8a6c5b8, infoPort=36457, infoSecurePort=0, ipcPort=34731, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700) 2024-12-11T22:38:16,697 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad6e8681cc564304 with lease ID 0xa9c33e918502d294: from storage DS-4a493267-32d5-40f3-b70c-c301e6bfc6de node DatanodeRegistration(127.0.0.1:39871, datanodeUuid=b9ea5724-f543-4ad3-840b-49b6c8a6c5b8, infoPort=36457, infoSecurePort=0, ipcPort=34731, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:16,697 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xad6e8681cc564304 with lease ID 0xa9c33e918502d294: Processing first storage report for DS-7af1bab1-8e4f-4a5f-b5c5-70717299c60a from datanode DatanodeRegistration(127.0.0.1:39871, datanodeUuid=b9ea5724-f543-4ad3-840b-49b6c8a6c5b8, infoPort=36457, infoSecurePort=0, ipcPort=34731, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700) 2024-12-11T22:38:16,697 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xad6e8681cc564304 with lease ID 0xa9c33e918502d294: from storage DS-7af1bab1-8e4f-4a5f-b5c5-70717299c60a node DatanodeRegistration(127.0.0.1:39871, datanodeUuid=b9ea5724-f543-4ad3-840b-49b6c8a6c5b8, infoPort=36457, infoSecurePort=0, ipcPort=34731, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:16,937 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data3/current/BP-2052876916-172.17.0.2-1733956694700/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:16,937 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data4/current/BP-2052876916-172.17.0.2-1733956694700/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:16,953 WARN [Thread-539 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:38:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa473a6f4514c4971 with lease ID 0xa9c33e918502d295: Processing first storage report for DS-0cd2d44f-f20a-4736-9c57-45f68af2e864 from datanode DatanodeRegistration(127.0.0.1:45739, datanodeUuid=cab13a40-ce0d-452c-b2fb-43ce908deff8, infoPort=42099, infoSecurePort=0, ipcPort=41367, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700) 2024-12-11T22:38:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa473a6f4514c4971 with lease ID 0xa9c33e918502d295: from storage DS-0cd2d44f-f20a-4736-9c57-45f68af2e864 node DatanodeRegistration(127.0.0.1:45739, datanodeUuid=cab13a40-ce0d-452c-b2fb-43ce908deff8, infoPort=42099, infoSecurePort=0, ipcPort=41367, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa473a6f4514c4971 with lease ID 0xa9c33e918502d295: Processing first storage report for DS-a0238de2-de07-46da-b74d-d4540a6fe123 from datanode DatanodeRegistration(127.0.0.1:45739, datanodeUuid=cab13a40-ce0d-452c-b2fb-43ce908deff8, infoPort=42099, infoSecurePort=0, ipcPort=41367, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700) 2024-12-11T22:38:16,955 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa473a6f4514c4971 with lease ID 0xa9c33e918502d295: from storage DS-a0238de2-de07-46da-b74d-d4540a6fe123 node DatanodeRegistration(127.0.0.1:45739, datanodeUuid=cab13a40-ce0d-452c-b2fb-43ce908deff8, infoPort=42099, infoSecurePort=0, ipcPort=41367, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:17,010 WARN [Thread-598 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data5/current/BP-2052876916-172.17.0.2-1733956694700/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:17,011 WARN [Thread-599 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data6/current/BP-2052876916-172.17.0.2-1733956694700/current, will proceed with Du for space computation calculation, 2024-12-11T22:38:17,030 WARN [Thread-561 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T22:38:17,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ebc8620c9df011c with lease ID 0xa9c33e918502d296: Processing first storage report for DS-a96ffe4e-e81b-43f1-b37c-e48c8d2b18dd from datanode DatanodeRegistration(127.0.0.1:34413, datanodeUuid=f54c79c1-0139-46d6-b1f5-cdaeee405238, infoPort=45149, infoSecurePort=0, ipcPort=35791, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700) 2024-12-11T22:38:17,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ebc8620c9df011c with lease ID 0xa9c33e918502d296: from storage DS-a96ffe4e-e81b-43f1-b37c-e48c8d2b18dd node DatanodeRegistration(127.0.0.1:34413, datanodeUuid=f54c79c1-0139-46d6-b1f5-cdaeee405238, infoPort=45149, infoSecurePort=0, ipcPort=35791, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:17,036 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ebc8620c9df011c with lease ID 0xa9c33e918502d296: Processing first storage report for DS-02e89282-c81e-423e-a701-c689d741c1fa from datanode DatanodeRegistration(127.0.0.1:34413, datanodeUuid=f54c79c1-0139-46d6-b1f5-cdaeee405238, infoPort=45149, infoSecurePort=0, ipcPort=35791, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700) 2024-12-11T22:38:17,036 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ebc8620c9df011c with lease ID 0xa9c33e918502d296: from storage DS-02e89282-c81e-423e-a701-c689d741c1fa node DatanodeRegistration(127.0.0.1:34413, datanodeUuid=f54c79c1-0139-46d6-b1f5-cdaeee405238, infoPort=45149, infoSecurePort=0, ipcPort=35791, storageInfo=lv=-57;cid=testClusterID;nsid=1854308190;c=1733956694700), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T22:38:17,091 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04 2024-12-11T22:38:17,094 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/zookeeper_0, clientPort=57561, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T22:38:17,095 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57561 2024-12-11T22:38:17,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,097 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741825_1001 (size=7) 2024-12-11T22:38:17,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741825_1001 (size=7) 2024-12-11T22:38:17,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741825_1001 (size=7) 2024-12-11T22:38:17,112 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16 with version=8 2024-12-11T22:38:17,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:43691/user/jenkins/test-data/466b5b34-b9fb-c00f-b994-4aa624d21049/hbase-staging 2024-12-11T22:38:17,114 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:17,114 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,114 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,114 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:17,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:17,115 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T22:38:17,115 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:17,115 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45281 2024-12-11T22:38:17,117 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45281 connecting to ZooKeeper ensemble=127.0.0.1:57561 2024-12-11T22:38:17,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452810x0, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:17,159 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45281-0x100cb7d5b3a0000 connected 2024-12-11T22:38:17,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:17,229 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16, hbase.cluster.distributed=false 2024-12-11T22:38:17,231 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:17,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45281 2024-12-11T22:38:17,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45281 2024-12-11T22:38:17,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45281 2024-12-11T22:38:17,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45281 2024-12-11T22:38:17,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45281 2024-12-11T22:38:17,251 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:38:17,252 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:17,253 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45265 2024-12-11T22:38:17,254 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45265 connecting to ZooKeeper ensemble=127.0.0.1:57561 2024-12-11T22:38:17,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,256 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,264 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452650x0, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:17,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45265-0x100cb7d5b3a0001 connected 2024-12-11T22:38:17,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:17,265 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:38:17,266 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T22:38:17,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:38:17,268 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:17,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45265 2024-12-11T22:38:17,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45265 2024-12-11T22:38:17,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45265 2024-12-11T22:38:17,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45265 2024-12-11T22:38:17,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45265 2024-12-11T22:38:17,287 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:17,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,287 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:17,288 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,288 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:17,288 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:38:17,288 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:17,289 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40551 2024-12-11T22:38:17,290 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40551 connecting to ZooKeeper ensemble=127.0.0.1:57561 2024-12-11T22:38:17,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405510x0, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:17,306 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:405510x0, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:17,307 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40551-0x100cb7d5b3a0002 connected 2024-12-11T22:38:17,307 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:38:17,308 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T22:38:17,308 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:38:17,309 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:17,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40551 2024-12-11T22:38:17,310 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40551 2024-12-11T22:38:17,311 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40551 2024-12-11T22:38:17,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40551 2024-12-11T22:38:17,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40551 2024-12-11T22:38:17,334 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b762025f20c5:0 server-side Connection retries=45 2024-12-11T22:38:17,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,334 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T22:38:17,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T22:38:17,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T22:38:17,335 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T22:38:17,335 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T22:38:17,336 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39009 2024-12-11T22:38:17,338 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39009 connecting to ZooKeeper ensemble=127.0.0.1:57561 2024-12-11T22:38:17,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390090x0, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T22:38:17,358 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390090x0, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:17,358 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39009-0x100cb7d5b3a0003 connected 2024-12-11T22:38:17,358 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T22:38:17,367 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T22:38:17,368 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T22:38:17,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T22:38:17,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39009 2024-12-11T22:38:17,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39009 2024-12-11T22:38:17,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39009 2024-12-11T22:38:17,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39009 2024-12-11T22:38:17,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39009 2024-12-11T22:38:17,414 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b762025f20c5:45281 2024-12-11T22:38:17,415 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,422 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,423 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:17,431 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:17,431 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,432 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T22:38:17,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,433 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b762025f20c5,45281,1733956697114 from backup master directory 2024-12-11T22:38:17,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,439 WARN [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:17,439 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,439 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T22:38:17,449 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/hbase.id] with ID: 5d607323-d724-486d-9950-19ce9a742b67 2024-12-11T22:38:17,450 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/.tmp/hbase.id 2024-12-11T22:38:17,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741826_1002 (size=42) 2024-12-11T22:38:17,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741826_1002 (size=42) 2024-12-11T22:38:17,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741826_1002 (size=42) 2024-12-11T22:38:17,462 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/.tmp/hbase.id]:[hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/hbase.id] 2024-12-11T22:38:17,482 INFO [master/b762025f20c5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T22:38:17,483 INFO [master/b762025f20c5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T22:38:17,484 INFO [master/b762025f20c5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-11T22:38:17,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,496 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741827_1003 (size=196) 2024-12-11T22:38:17,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741827_1003 (size=196) 2024-12-11T22:38:17,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741827_1003 (size=196) 2024-12-11T22:38:17,505 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:38:17,506 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T22:38:17,506 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T22:38:17,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741828_1004 (size=1189) 2024-12-11T22:38:17,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741828_1004 (size=1189) 2024-12-11T22:38:17,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741828_1004 (size=1189) 2024-12-11T22:38:17,520 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store 2024-12-11T22:38:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741829_1005 (size=34) 2024-12-11T22:38:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741829_1005 (size=34) 2024-12-11T22:38:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741829_1005 (size=34) 2024-12-11T22:38:17,529 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:17,529 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T22:38:17,529 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:17,529 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:17,529 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T22:38:17,529 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:17,530 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:17,530 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733956697529Disabling compacts and flushes for region at 1733956697529Disabling writes for close at 1733956697529Writing region close event to WAL at 1733956697530 (+1 ms)Closed at 1733956697530 2024-12-11T22:38:17,531 WARN [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/.initializing 2024-12-11T22:38:17,531 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/WALs/b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,536 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C45281%2C1733956697114, suffix=, logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/WALs/b762025f20c5,45281,1733956697114, archiveDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/oldWALs, maxLogs=10 2024-12-11T22:38:17,537 INFO [master/b762025f20c5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor b762025f20c5%2C45281%2C1733956697114.1733956697536 2024-12-11T22:38:17,548 INFO [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/WALs/b762025f20c5,45281,1733956697114/b762025f20c5%2C45281%2C1733956697114.1733956697536 2024-12-11T22:38:17,550 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42099:42099),(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:36457:36457)] 2024-12-11T22:38:17,550 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:38:17,551 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:17,551 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,551 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T22:38:17,562 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:17,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:17,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,566 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T22:38:17,566 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:17,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:17,568 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,572 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T22:38:17,572 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:17,573 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:17,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T22:38:17,577 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:17,578 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:17,579 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,580 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,581 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,583 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,584 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,584 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:38:17,586 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T22:38:17,590 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:38:17,590 INFO [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61688094, jitterRate=-0.08077576756477356}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:38:17,592 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733956697551Initializing all the Stores at 1733956697552 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956697553 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956697559 (+6 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956697559Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956697559Cleaning up temporary data from old regions at 1733956697584 (+25 ms)Region opened successfully at 1733956697592 (+8 ms) 2024-12-11T22:38:17,593 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T22:38:17,598 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@597c42f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:17,599 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T22:38:17,599 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T22:38:17,599 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T22:38:17,599 INFO [master/b762025f20c5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T22:38:17,600 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-11T22:38:17,601 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-11T22:38:17,601 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T22:38:17,604 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T22:38:17,605 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T22:38:17,614 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T22:38:17,615 INFO [master/b762025f20c5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T22:38:17,616 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T22:38:17,622 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T22:38:17,623 INFO [master/b762025f20c5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T22:38:17,624 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T22:38:17,639 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T22:38:17,640 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T22:38:17,655 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T22:38:17,659 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T22:38:17,664 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T22:38:17,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:17,672 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:17,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:17,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:17,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,672 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,673 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b762025f20c5,45281,1733956697114, sessionid=0x100cb7d5b3a0000, setting cluster-up flag (Was=false) 2024-12-11T22:38:17,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,714 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,807 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T22:38:17,808 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,839 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:17,864 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T22:38:17,866 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b762025f20c5,45281,1733956697114 2024-12-11T22:38:17,868 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T22:38:17,876 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:17,877 INFO [master/b762025f20c5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T22:38:17,877 INFO [master/b762025f20c5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T22:38:17,877 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b762025f20c5,45281,1733956697114 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T22:38:17,880 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:17,880 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:17,880 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:17,880 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b762025f20c5:0, corePoolSize=5, maxPoolSize=5 2024-12-11T22:38:17,880 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b762025f20c5:0, corePoolSize=10, maxPoolSize=10 2024-12-11T22:38:17,881 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:17,881 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:17,881 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:17,882 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733956727882 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T22:38:17,883 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:17,883 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:17,883 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T22:38:17,884 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T22:38:17,884 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T22:38:17,884 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T22:38:17,884 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T22:38:17,884 INFO [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T22:38:17,885 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733956697885,5,FailOnTimeoutGroup] 2024-12-11T22:38:17,885 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733956697885,5,FailOnTimeoutGroup] 2024-12-11T22:38:17,885 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:17,885 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T22:38:17,885 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:17,885 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:17,888 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:17,888 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T22:38:17,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741831_1007 (size=1321) 2024-12-11T22:38:17,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741831_1007 (size=1321) 2024-12-11T22:38:17,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741831_1007 (size=1321) 2024-12-11T22:38:17,902 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(746): ClusterId : 5d607323-d724-486d-9950-19ce9a742b67 2024-12-11T22:38:17,902 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:38:17,912 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T22:38:17,913 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16 2024-12-11T22:38:17,914 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:38:17,915 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:38:17,915 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(746): ClusterId : 5d607323-d724-486d-9950-19ce9a742b67 2024-12-11T22:38:17,915 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:38:17,927 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(746): ClusterId : 5d607323-d724-486d-9950-19ce9a742b67 2024-12-11T22:38:17,927 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T22:38:17,941 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:38:17,943 DEBUG [RS:0;b762025f20c5:45265 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34eab060, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:17,946 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:38:17,946 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:38:17,960 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:38:17,960 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T22:38:17,960 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T22:38:17,961 DEBUG [RS:1;b762025f20c5:40551 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6073d543, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:17,963 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b762025f20c5:45265 2024-12-11T22:38:17,963 INFO [RS:0;b762025f20c5:45265 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:38:17,964 INFO [RS:0;b762025f20c5:45265 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:38:17,964 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:38:17,973 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,45281,1733956697114 with port=45265, startcode=1733956697251 2024-12-11T22:38:17,973 DEBUG [RS:0;b762025f20c5:45265 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:38:17,982 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b762025f20c5:40551 2024-12-11T22:38:17,982 INFO [RS:1;b762025f20c5:40551 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:38:17,982 INFO [RS:1;b762025f20c5:40551 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:38:17,983 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:38:17,984 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T22:38:17,985 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,45281,1733956697114 with port=40551, startcode=1733956697287 2024-12-11T22:38:17,985 DEBUG [RS:2;b762025f20c5:39009 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f89760, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b762025f20c5/172.17.0.2:0 2024-12-11T22:38:17,985 DEBUG [RS:1;b762025f20c5:40551 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:38:17,990 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42395, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:38:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741832_1008 (size=32) 2024-12-11T22:38:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741832_1008 (size=32) 2024-12-11T22:38:17,995 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,45265,1733956697251 2024-12-11T22:38:17,995 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,45265,1733956697251 2024-12-11T22:38:17,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741832_1008 (size=32) 2024-12-11T22:38:18,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:18,003 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:38:18,004 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,40551,1733956697287 2024-12-11T22:38:18,004 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,40551,1733956697287 2024-12-11T22:38:18,010 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16 2024-12-11T22:38:18,010 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34987 2024-12-11T22:38:18,010 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:38:18,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T22:38:18,016 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16 2024-12-11T22:38:18,017 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34987 2024-12-11T22:38:18,017 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:38:18,018 DEBUG [RS:2;b762025f20c5:39009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b762025f20c5:39009 2024-12-11T22:38:18,018 INFO [RS:2;b762025f20c5:39009 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T22:38:18,018 INFO [RS:2;b762025f20c5:39009 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T22:38:18,018 DEBUG [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T22:38:18,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T22:38:18,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:18,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T22:38:18,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T22:38:18,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T22:38:18,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T22:38:18,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T22:38:18,055 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T22:38:18,056 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T22:38:18,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740 2024-12-11T22:38:18,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740 2024-12-11T22:38:18,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T22:38:18,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T22:38:18,063 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:38:18,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T22:38:18,072 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:38:18,073 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60149769, jitterRate=-0.10369859635829926}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:38:18,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733956698001Initializing all the Stores at 1733956698006 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956698007 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956698011 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956698011Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956698011Cleaning up temporary data from old regions at 1733956698062 (+51 ms)Region opened successfully at 1733956698074 (+12 ms) 2024-12-11T22:38:18,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T22:38:18,075 DEBUG [RS:0;b762025f20c5:45265 {}] zookeeper.ZKUtil(111): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,075 WARN [RS:0;b762025f20c5:45265 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:18,075 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,40551,1733956697287] 2024-12-11T22:38:18,075 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,45265,1733956697251] 2024-12-11T22:38:18,075 INFO [RS:0;b762025f20c5:45265 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T22:38:18,075 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,076 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T22:38:18,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T22:38:18,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T22:38:18,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T22:38:18,077 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T22:38:18,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733956698075Disabling compacts and flushes for region at 1733956698075Disabling writes for close at 1733956698076 (+1 ms)Writing region close event to WAL at 1733956698077 (+1 ms)Closed at 1733956698077 2024-12-11T22:38:18,080 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(2659): reportForDuty to master=b762025f20c5,45281,1733956697114 with port=39009, startcode=1733956697333 2024-12-11T22:38:18,080 DEBUG [RS:2;b762025f20c5:39009 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T22:38:18,083 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55465, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T22:38:18,084 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b762025f20c5,39009,1733956697333 2024-12-11T22:38:18,084 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45281 {}] master.ServerManager(517): Registering regionserver=b762025f20c5,39009,1733956697333 2024-12-11T22:38:18,086 DEBUG [RS:1;b762025f20c5:40551 {}] zookeeper.ZKUtil(111): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,40551,1733956697287 2024-12-11T22:38:18,086 WARN [RS:1;b762025f20c5:40551 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:18,086 INFO [RS:1;b762025f20c5:40551 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T22:38:18,086 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,40551,1733956697287 2024-12-11T22:38:18,089 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:18,089 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T22:38:18,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T22:38:18,091 INFO [RS:0;b762025f20c5:45265 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:38:18,091 DEBUG [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16 2024-12-11T22:38:18,091 DEBUG [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34987 2024-12-11T22:38:18,091 DEBUG [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T22:38:18,098 DEBUG [RS:2;b762025f20c5:39009 {}] zookeeper.ZKUtil(111): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b762025f20c5,39009,1733956697333 2024-12-11T22:38:18,098 WARN [RS:2;b762025f20c5:39009 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T22:38:18,098 INFO [RS:2;b762025f20c5:39009 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T22:38:18,098 DEBUG [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,39009,1733956697333 2024-12-11T22:38:18,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:18,100 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T22:38:18,101 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T22:38:18,103 INFO [RS:0;b762025f20c5:45265 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:38:18,104 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b762025f20c5,39009,1733956697333] 2024-12-11T22:38:18,105 INFO [RS:0;b762025f20c5:45265 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:38:18,105 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,107 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:38:18,108 INFO [RS:1;b762025f20c5:40551 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:38:18,112 INFO [RS:0;b762025f20c5:45265 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:38:18,112 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,112 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,112 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,113 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,114 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,114 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:18,114 DEBUG [RS:0;b762025f20c5:45265 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:18,120 INFO [RS:1;b762025f20c5:40551 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:38:18,122 INFO [RS:1;b762025f20c5:40551 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:38:18,122 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,123 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:38:18,124 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,124 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,124 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,124 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,124 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,124 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45265,1733956697251-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:18,125 INFO [RS:1;b762025f20c5:40551 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:38:18,125 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,125 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,125 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:18,126 DEBUG [RS:1;b762025f20c5:40551 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:18,128 INFO [RS:2;b762025f20c5:39009 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T22:38:18,131 INFO [RS:2;b762025f20c5:39009 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T22:38:18,139 INFO [RS:2;b762025f20c5:39009 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T22:38:18,139 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,140 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T22:38:18,140 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,140 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,140 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,140 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,140 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,140 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,40551,1733956697287-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:18,146 INFO [RS:2;b762025f20c5:39009 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T22:38:18,146 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,146 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b762025f20c5:0, corePoolSize=2, maxPoolSize=2 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,147 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,148 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,148 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b762025f20c5:0, corePoolSize=1, maxPoolSize=1 2024-12-11T22:38:18,148 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:18,148 DEBUG [RS:2;b762025f20c5:39009 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0, corePoolSize=3, maxPoolSize=3 2024-12-11T22:38:18,154 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:38:18,154 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45265,1733956697251-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,154 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,154 INFO [RS:0;b762025f20c5:45265 {}] regionserver.Replication(171): b762025f20c5,45265,1733956697251 started 2024-12-11T22:38:18,155 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,155 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,156 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,156 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,156 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,156 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,39009,1733956697333-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:18,161 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:38:18,161 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,40551,1733956697287-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,162 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,162 INFO [RS:1;b762025f20c5:40551 {}] regionserver.Replication(171): b762025f20c5,40551,1733956697287 started 2024-12-11T22:38:18,170 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,171 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,45265,1733956697251, RpcServer on b762025f20c5/172.17.0.2:45265, sessionid=0x100cb7d5b3a0001 2024-12-11T22:38:18,171 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:38:18,171 DEBUG [RS:0;b762025f20c5:45265 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,171 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,45265,1733956697251' 2024-12-11T22:38:18,171 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:38:18,172 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:38:18,173 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:38:18,173 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:38:18,173 DEBUG [RS:0;b762025f20c5:45265 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,173 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,45265,1733956697251' 2024-12-11T22:38:18,173 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:38:18,175 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:38:18,179 DEBUG [RS:0;b762025f20c5:45265 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:38:18,180 INFO [RS:0;b762025f20c5:45265 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:38:18,180 INFO [RS:0;b762025f20c5:45265 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:38:18,182 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T22:38:18,182 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,39009,1733956697333-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,182 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,182 INFO [RS:2;b762025f20c5:39009 {}] regionserver.Replication(171): b762025f20c5,39009,1733956697333 started 2024-12-11T22:38:18,183 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,183 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,40551,1733956697287, RpcServer on b762025f20c5/172.17.0.2:40551, sessionid=0x100cb7d5b3a0002 2024-12-11T22:38:18,183 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:38:18,183 DEBUG [RS:1;b762025f20c5:40551 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,40551,1733956697287 2024-12-11T22:38:18,183 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,40551,1733956697287' 2024-12-11T22:38:18,183 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:38:18,184 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:38:18,184 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:38:18,185 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:38:18,185 DEBUG [RS:1;b762025f20c5:40551 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,40551,1733956697287 2024-12-11T22:38:18,185 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,40551,1733956697287' 2024-12-11T22:38:18,185 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:38:18,185 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:38:18,186 DEBUG [RS:1;b762025f20c5:40551 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:38:18,186 INFO [RS:1;b762025f20c5:40551 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:38:18,186 INFO [RS:1;b762025f20c5:40551 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:38:18,201 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,201 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(1482): Serving as b762025f20c5,39009,1733956697333, RpcServer on b762025f20c5/172.17.0.2:39009, sessionid=0x100cb7d5b3a0003 2024-12-11T22:38:18,202 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T22:38:18,202 DEBUG [RS:2;b762025f20c5:39009 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b762025f20c5,39009,1733956697333 2024-12-11T22:38:18,202 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,39009,1733956697333' 2024-12-11T22:38:18,202 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T22:38:18,203 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T22:38:18,203 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T22:38:18,203 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T22:38:18,203 DEBUG [RS:2;b762025f20c5:39009 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b762025f20c5,39009,1733956697333 2024-12-11T22:38:18,203 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b762025f20c5,39009,1733956697333' 2024-12-11T22:38:18,203 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T22:38:18,204 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T22:38:18,205 DEBUG [RS:2;b762025f20c5:39009 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T22:38:18,205 INFO [RS:2;b762025f20c5:39009 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T22:38:18,205 INFO [RS:2;b762025f20c5:39009 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T22:38:18,252 WARN [b762025f20c5:45281 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T22:38:18,282 INFO [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C45265%2C1733956697251, suffix=, logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,45265,1733956697251, archiveDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs, maxLogs=32 2024-12-11T22:38:18,283 INFO [RS:0;b762025f20c5:45265 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b762025f20c5%2C45265%2C1733956697251.1733956698283 2024-12-11T22:38:18,288 INFO [RS:1;b762025f20c5:40551 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C40551%2C1733956697287, suffix=, logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,40551,1733956697287, archiveDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs, maxLogs=32 2024-12-11T22:38:18,289 INFO [RS:1;b762025f20c5:40551 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b762025f20c5%2C40551%2C1733956697287.1733956698289 2024-12-11T22:38:18,292 INFO [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,45265,1733956697251/b762025f20c5%2C45265%2C1733956697251.1733956698283 2024-12-11T22:38:18,296 DEBUG [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:36457:36457),(127.0.0.1/127.0.0.1:42099:42099)] 2024-12-11T22:38:18,305 INFO [RS:1;b762025f20c5:40551 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,40551,1733956697287/b762025f20c5%2C40551%2C1733956697287.1733956698289 2024-12-11T22:38:18,307 INFO [RS:2;b762025f20c5:39009 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C39009%2C1733956697333, suffix=, logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,39009,1733956697333, archiveDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs, maxLogs=32 2024-12-11T22:38:18,307 DEBUG [RS:1;b762025f20c5:40551 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42099:42099),(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:36457:36457)] 2024-12-11T22:38:18,308 INFO [RS:2;b762025f20c5:39009 {}] monitor.StreamSlowMonitor(122): New stream slow monitor b762025f20c5%2C39009%2C1733956697333.1733956698308 2024-12-11T22:38:18,316 INFO [RS:2;b762025f20c5:39009 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,39009,1733956697333/b762025f20c5%2C39009%2C1733956697333.1733956698308 2024-12-11T22:38:18,317 DEBUG [RS:2;b762025f20c5:39009 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:42099:42099),(127.0.0.1/127.0.0.1:36457:36457)] 2024-12-11T22:38:18,502 DEBUG [b762025f20c5:45281 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T22:38:18,503 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:38:18,505 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:38:18,506 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:38:18,506 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:38:18,506 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:38:18,506 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:38:18,506 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:38:18,506 INFO [b762025f20c5:45281 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:38:18,506 INFO [b762025f20c5:45281 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:38:18,506 INFO [b762025f20c5:45281 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:38:18,506 DEBUG [b762025f20c5:45281 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:38:18,506 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,508 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b762025f20c5,45265,1733956697251, state=OPENING 2024-12-11T22:38:18,522 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T22:38:18,531 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:18,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,532 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T22:38:18,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b762025f20c5,45265,1733956697251}] 2024-12-11T22:38:18,687 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:38:18,689 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51535, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:38:18,695 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T22:38:18,696 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T22:38:18,699 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b762025f20c5%2C45265%2C1733956697251.meta, suffix=.meta, logDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,45265,1733956697251, archiveDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs, maxLogs=32 2024-12-11T22:38:18,701 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor b762025f20c5%2C45265%2C1733956697251.meta.1733956698700.meta 2024-12-11T22:38:18,720 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/WALs/b762025f20c5,45265,1733956697251/b762025f20c5%2C45265%2C1733956697251.meta.1733956698700.meta 2024-12-11T22:38:18,724 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45149:45149),(127.0.0.1/127.0.0.1:42099:42099),(127.0.0.1/127.0.0.1:36457:36457)] 2024-12-11T22:38:18,724 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:38:18,725 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T22:38:18,725 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T22:38:18,725 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T22:38:18,725 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T22:38:18,725 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:18,726 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T22:38:18,726 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T22:38:18,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T22:38:18,733 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T22:38:18,733 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T22:38:18,735 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T22:38:18,735 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T22:38:18,737 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T22:38:18,737 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T22:38:18,739 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T22:38:18,739 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T22:38:18,740 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T22:38:18,741 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740 2024-12-11T22:38:18,743 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740 2024-12-11T22:38:18,744 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T22:38:18,744 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T22:38:18,745 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T22:38:18,747 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T22:38:18,748 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63714840, jitterRate=-0.0505748987197876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T22:38:18,749 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T22:38:18,750 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733956698726Writing region info on filesystem at 1733956698726Initializing all the Stores at 1733956698731 (+5 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956698731Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956698731Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956698731Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733956698731Cleaning up temporary data from old regions at 1733956698744 (+13 ms)Running coprocessor post-open hooks at 1733956698749 (+5 ms)Region opened successfully at 1733956698750 (+1 ms) 2024-12-11T22:38:18,752 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733956698687 2024-12-11T22:38:18,760 DEBUG [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T22:38:18,760 INFO [RS_OPEN_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T22:38:18,762 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,765 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b762025f20c5,45265,1733956697251, state=OPEN 2024-12-11T22:38:18,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:18,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:18,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:18,773 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T22:38:18,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,773 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b762025f20c5,45265,1733956697251 2024-12-11T22:38:18,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T22:38:18,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T22:38:18,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b762025f20c5,45265,1733956697251 in 241 msec 2024-12-11T22:38:18,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T22:38:18,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 689 msec 2024-12-11T22:38:18,790 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T22:38:18,790 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T22:38:18,796 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:38:18,796 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,45265,1733956697251, seqNum=-1] 2024-12-11T22:38:18,796 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:38:18,798 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35545, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:38:18,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 939 msec 2024-12-11T22:38:18,811 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733956698811, completionTime=-1 2024-12-11T22:38:18,811 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T22:38:18,811 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T22:38:18,814 INFO [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T22:38:18,814 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733956758814 2024-12-11T22:38:18,814 INFO [master/b762025f20c5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733956818814 2024-12-11T22:38:18,815 INFO [master/b762025f20c5:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-11T22:38:18,815 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T22:38:18,818 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956697114-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,819 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956697114-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,819 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956697114-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,819 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b762025f20c5:45281, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,819 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,819 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,821 DEBUG [master/b762025f20c5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.386sec 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T22:38:18,825 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956697114-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T22:38:18,826 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956697114-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T22:38:18,829 DEBUG [master/b762025f20c5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T22:38:18,829 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T22:38:18,829 INFO [master/b762025f20c5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b762025f20c5,45281,1733956697114-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T22:38:18,909 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ca15d1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:38:18,909 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b762025f20c5,45281,-1 for getting cluster id 2024-12-11T22:38:18,910 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T22:38:18,916 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d607323-d724-486d-9950-19ce9a742b67' 2024-12-11T22:38:18,923 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T22:38:18,924 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d607323-d724-486d-9950-19ce9a742b67" 2024-12-11T22:38:18,924 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77bd65ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:38:18,924 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b762025f20c5,45281,-1] 2024-12-11T22:38:18,925 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T22:38:18,927 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:18,930 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37620, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T22:38:18,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64710661, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T22:38:18,938 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T22:38:18,944 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b762025f20c5,45265,1733956697251, seqNum=-1] 2024-12-11T22:38:18,944 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:38:18,951 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41648, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:38:18,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b762025f20c5,45281,1733956697114 2024-12-11T22:38:18,955 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T22:38:18,957 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is b762025f20c5,45281,1733956697114 2024-12-11T22:38:18,958 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7050d6f2 2024-12-11T22:38:18,958 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T22:38:18,960 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T22:38:18,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T22:38:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T22:38:18,973 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T22:38:18,973 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:18,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T22:38:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:18,976 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T22:38:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741837_1013 (size=392) 2024-12-11T22:38:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741837_1013 (size=392) 2024-12-11T22:38:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741837_1013 (size=392) 2024-12-11T22:38:19,028 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa641e31c2548f731509a955b2535d63, NAME => 'TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16 2024-12-11T22:38:19,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741838_1014 (size=51) 2024-12-11T22:38:19,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741838_1014 (size=51) 2024-12-11T22:38:19,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741838_1014 (size=51) 2024-12-11T22:38:19,056 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:19,056 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing aa641e31c2548f731509a955b2535d63, disabling compactions & flushes 2024-12-11T22:38:19,056 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,056 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,056 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. after waiting 0 ms 2024-12-11T22:38:19,057 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,057 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,057 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa641e31c2548f731509a955b2535d63: Waiting for close lock at 1733956699056Disabling compacts and flushes for region at 1733956699056Disabling writes for close at 1733956699056Writing region close event to WAL at 1733956699057 (+1 ms)Closed at 1733956699057 2024-12-11T22:38:19,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T22:38:19,060 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733956699059"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733956699059"}]},"ts":"1733956699059"} 2024-12-11T22:38:19,064 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T22:38:19,068 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T22:38:19,069 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733956699068"}]},"ts":"1733956699068"} 2024-12-11T22:38:19,076 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T22:38:19,077 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b762025f20c5=0} racks are {/default-rack=0} 2024-12-11T22:38:19,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T22:38:19,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T22:38:19,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T22:38:19,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T22:38:19,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T22:38:19,078 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T22:38:19,079 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T22:38:19,079 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T22:38:19,079 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T22:38:19,079 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T22:38:19,079 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=aa641e31c2548f731509a955b2535d63, ASSIGN}] 2024-12-11T22:38:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:19,083 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=aa641e31c2548f731509a955b2535d63, ASSIGN 2024-12-11T22:38:19,090 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=aa641e31c2548f731509a955b2535d63, ASSIGN; state=OFFLINE, location=b762025f20c5,40551,1733956697287; forceNewPlan=false, retain=false 2024-12-11T22:38:19,241 INFO [b762025f20c5:45281 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T22:38:19,242 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa641e31c2548f731509a955b2535d63, regionState=OPENING, regionLocation=b762025f20c5,40551,1733956697287 2024-12-11T22:38:19,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=aa641e31c2548f731509a955b2535d63, ASSIGN because future has completed 2024-12-11T22:38:19,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa641e31c2548f731509a955b2535d63, server=b762025f20c5,40551,1733956697287}] 2024-12-11T22:38:19,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:19,408 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T22:38:19,416 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T22:38:19,459 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,460 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => aa641e31c2548f731509a955b2535d63, NAME => 'TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63.', STARTKEY => '', ENDKEY => ''} 2024-12-11T22:38:19,460 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,460 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T22:38:19,461 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,461 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,472 INFO [StoreOpener-aa641e31c2548f731509a955b2535d63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,480 INFO [StoreOpener-aa641e31c2548f731509a955b2535d63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa641e31c2548f731509a955b2535d63 columnFamilyName cf 2024-12-11T22:38:19,480 DEBUG [StoreOpener-aa641e31c2548f731509a955b2535d63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T22:38:19,486 INFO [StoreOpener-aa641e31c2548f731509a955b2535d63-1 {}] regionserver.HStore(327): Store=aa641e31c2548f731509a955b2535d63/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T22:38:19,486 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,491 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,492 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,493 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,493 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,501 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,521 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T22:38:19,522 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened aa641e31c2548f731509a955b2535d63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69079774, jitterRate=0.029368847608566284}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T22:38:19,522 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:19,524 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for aa641e31c2548f731509a955b2535d63: Running coprocessor pre-open hook at 1733956699461Writing region info on filesystem at 1733956699461Initializing all the Stores at 1733956699470 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733956699470Cleaning up temporary data from old regions at 1733956699493 (+23 ms)Running coprocessor post-open hooks at 1733956699522 (+29 ms)Region opened successfully at 1733956699524 (+2 ms) 2024-12-11T22:38:19,528 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63., pid=6, masterSystemTime=1733956699407 2024-12-11T22:38:19,534 DEBUG [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,535 INFO [RS_OPEN_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,536 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=aa641e31c2548f731509a955b2535d63, regionState=OPEN, openSeqNum=2, regionLocation=b762025f20c5,40551,1733956697287 2024-12-11T22:38:19,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa641e31c2548f731509a955b2535d63, server=b762025f20c5,40551,1733956697287 because future has completed 2024-12-11T22:38:19,559 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T22:38:19,559 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure aa641e31c2548f731509a955b2535d63, server=b762025f20c5,40551,1733956697287 in 299 msec 2024-12-11T22:38:19,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T22:38:19,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=aa641e31c2548f731509a955b2535d63, ASSIGN in 480 msec 2024-12-11T22:38:19,566 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T22:38:19,567 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733956699566"}]},"ts":"1733956699566"} 2024-12-11T22:38:19,571 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T22:38:19,574 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T22:38:19,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 612 msec 2024-12-11T22:38:19,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T22:38:19,603 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T22:38:19,603 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T22:38:19,603 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:38:19,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T22:38:19,608 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T22:38:19,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T22:38:19,613 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63., hostname=b762025f20c5,40551,1733956697287, seqNum=2] 2024-12-11T22:38:19,613 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T22:38:19,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47590, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T22:38:19,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T22:38:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T22:38:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:19,623 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T22:38:19,625 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T22:38:19,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T22:38:19,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:19,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40551 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T22:38:19,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:19,784 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing aa641e31c2548f731509a955b2535d63 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T22:38:19,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/.tmp/cf/46af38399ea8464bae2f4c371648f5a6 is 36, key is row/cf:cq/1733956699616/Put/seqid=0 2024-12-11T22:38:19,807 WARN [IPC Server handler 2 on default port 34987 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T22:38:19,807 WARN [IPC Server handler 2 on default port 34987 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T22:38:19,808 WARN [IPC Server handler 2 on default port 34987 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T22:38:19,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741839_1015 (size=4787) 2024-12-11T22:38:19,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741839_1015 (size=4787) 2024-12-11T22:38:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:20,224 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/.tmp/cf/46af38399ea8464bae2f4c371648f5a6 2024-12-11T22:38:20,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/.tmp/cf/46af38399ea8464bae2f4c371648f5a6 as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/cf/46af38399ea8464bae2f4c371648f5a6 2024-12-11T22:38:20,244 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/cf/46af38399ea8464bae2f4c371648f5a6, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T22:38:20,246 INFO [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for aa641e31c2548f731509a955b2535d63 in 462ms, sequenceid=5, compaction requested=false 2024-12-11T22:38:20,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for aa641e31c2548f731509a955b2535d63: 2024-12-11T22:38:20,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:20,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/b762025f20c5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T22:38:20,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T22:38:20,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:20,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T22:38:20,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 624 msec 2024-12-11T22:38:20,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 638 msec 2024-12-11T22:38:20,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45281 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T22:38:20,763 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T22:38:20,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T22:38:20,767 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T22:38:20,767 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:20,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:20,767 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:20,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T22:38:20,767 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T22:38:20,767 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=834583412, stopped=false 2024-12-11T22:38:20,767 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b762025f20c5,45281,1733956697114 2024-12-11T22:38:20,897 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:20,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:20,898 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:20,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:20,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:20,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:20,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T22:38:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:20,900 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:20,900 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:20,900 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T22:38:20,900 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T22:38:20,901 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T22:38:20,901 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:20,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:20,901 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,45265,1733956697251' ***** 2024-12-11T22:38:20,902 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T22:38:20,902 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,40551,1733956697287' ***** 2024-12-11T22:38:20,902 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T22:38:20,902 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b762025f20c5,39009,1733956697333' ***** 2024-12-11T22:38:20,902 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T22:38:20,902 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T22:38:20,902 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T22:38:20,903 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T22:38:20,903 INFO [RS:2;b762025f20c5:39009 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T22:38:20,903 INFO [RS:0;b762025f20c5:45265 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T22:38:20,903 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T22:38:20,903 INFO [RS:2;b762025f20c5:39009 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T22:38:20,903 INFO [RS:0;b762025f20c5:45265 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T22:38:20,903 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,39009,1733956697333 2024-12-11T22:38:20,903 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,45265,1733956697251 2024-12-11T22:38:20,903 INFO [RS:2;b762025f20c5:39009 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:20,903 INFO [RS:0;b762025f20c5:45265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:20,903 INFO [RS:2;b762025f20c5:39009 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b762025f20c5:39009. 2024-12-11T22:38:20,903 INFO [RS:0;b762025f20c5:45265 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b762025f20c5:45265. 2024-12-11T22:38:20,903 DEBUG [RS:2;b762025f20c5:39009 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:20,904 DEBUG [RS:2;b762025f20c5:39009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:20,904 DEBUG [RS:0;b762025f20c5:45265 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:20,904 DEBUG [RS:0;b762025f20c5:45265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:20,904 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,39009,1733956697333; all regions closed. 2024-12-11T22:38:20,904 INFO [RS:0;b762025f20c5:45265 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T22:38:20,904 INFO [RS:0;b762025f20c5:45265 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T22:38:20,904 INFO [RS:0;b762025f20c5:45265 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T22:38:20,904 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T22:38:20,904 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T22:38:20,904 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T22:38:20,905 INFO [RS:1;b762025f20c5:40551 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T22:38:20,905 INFO [RS:1;b762025f20c5:40551 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T22:38:20,905 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(3091): Received CLOSE for aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:20,905 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T22:38:20,905 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T22:38:20,905 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T22:38:20,906 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T22:38:20,906 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T22:38:20,906 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T22:38:20,906 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T22:38:20,906 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T22:38:20,906 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T22:38:20,907 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(959): stopping server b762025f20c5,40551,1733956697287 2024-12-11T22:38:20,907 INFO [RS:1;b762025f20c5:40551 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:20,907 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing aa641e31c2548f731509a955b2535d63, disabling compactions & flushes 2024-12-11T22:38:20,907 INFO [RS:1;b762025f20c5:40551 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b762025f20c5:40551. 2024-12-11T22:38:20,907 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:20,907 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:20,907 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:20,907 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. after waiting 0 ms 2024-12-11T22:38:20,907 DEBUG [RS:1;b762025f20c5:40551 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T22:38:20,907 DEBUG [RS:1;b762025f20c5:40551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:20,907 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:20,907 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:20,907 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:20,907 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T22:38:20,908 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1325): Online Regions={aa641e31c2548f731509a955b2535d63=TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63.} 2024-12-11T22:38:20,908 DEBUG [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1351): Waiting on aa641e31c2548f731509a955b2535d63 2024-12-11T22:38:20,911 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:20,911 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741835_1011 (size=93) 2024-12-11T22:38:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741835_1011 (size=93) 2024-12-11T22:38:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741835_1011 (size=93) 2024-12-11T22:38:20,925 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/default/TestHBaseWalOnEC/aa641e31c2548f731509a955b2535d63/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T22:38:20,927 INFO [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:20,927 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for aa641e31c2548f731509a955b2535d63: Waiting for close lock at 1733956700907Running coprocessor pre-close hooks at 1733956700907Disabling compacts and flushes for region at 1733956700907Disabling writes for close at 1733956700907Writing region close event to WAL at 1733956700916 (+9 ms)Running coprocessor post-close hooks at 1733956700926 (+10 ms)Closed at 1733956700927 (+1 ms) 2024-12-11T22:38:20,927 DEBUG [RS_CLOSE_REGION-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63. 2024-12-11T22:38:20,929 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:20,937 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/info/5c983c4a7b2f46168600c75487c69fc9 is 153, key is TestHBaseWalOnEC,,1733956698961.aa641e31c2548f731509a955b2535d63./info:regioninfo/1733956699536/Put/seqid=0 2024-12-11T22:38:20,952 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:20,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741840_1016 (size=6637) 2024-12-11T22:38:20,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741840_1016 (size=6637) 2024-12-11T22:38:20,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741840_1016 (size=6637) 2024-12-11T22:38:20,955 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/info/5c983c4a7b2f46168600c75487c69fc9 2024-12-11T22:38:20,963 INFO [regionserver/b762025f20c5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:20,988 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/ns/ea368e0537c049b990a30b497efb2cd4 is 43, key is default/ns:d/1733956698799/Put/seqid=0 2024-12-11T22:38:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741841_1017 (size=5153) 2024-12-11T22:38:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741841_1017 (size=5153) 2024-12-11T22:38:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741841_1017 (size=5153) 2024-12-11T22:38:20,998 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/ns/ea368e0537c049b990a30b497efb2cd4 2024-12-11T22:38:21,022 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/table/6ef5b84eb8b3494cb35619d409161996 is 52, key is TestHBaseWalOnEC/table:state/1733956699566/Put/seqid=0 2024-12-11T22:38:21,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741842_1018 (size=5249) 2024-12-11T22:38:21,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741842_1018 (size=5249) 2024-12-11T22:38:21,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741842_1018 (size=5249) 2024-12-11T22:38:21,035 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/table/6ef5b84eb8b3494cb35619d409161996 2024-12-11T22:38:21,052 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/info/5c983c4a7b2f46168600c75487c69fc9 as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/info/5c983c4a7b2f46168600c75487c69fc9 2024-12-11T22:38:21,064 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/info/5c983c4a7b2f46168600c75487c69fc9, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T22:38:21,066 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/ns/ea368e0537c049b990a30b497efb2cd4 as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/ns/ea368e0537c049b990a30b497efb2cd4 2024-12-11T22:38:21,078 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/ns/ea368e0537c049b990a30b497efb2cd4, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T22:38:21,082 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/.tmp/table/6ef5b84eb8b3494cb35619d409161996 as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/table/6ef5b84eb8b3494cb35619d409161996 2024-12-11T22:38:21,095 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/table/6ef5b84eb8b3494cb35619d409161996, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T22:38:21,098 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 191ms, sequenceid=11, compaction requested=false 2024-12-11T22:38:21,106 DEBUG [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T22:38:21,108 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,40551,1733956697287; all regions closed. 2024-12-11T22:38:21,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,109 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,109 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741834_1010 (size=1298) 2024-12-11T22:38:21,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741834_1010 (size=1298) 2024-12-11T22:38:21,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741834_1010 (size=1298) 2024-12-11T22:38:21,119 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T22:38:21,120 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T22:38:21,120 DEBUG [RS:1;b762025f20c5:40551 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs 2024-12-11T22:38:21,120 INFO [RS:1;b762025f20c5:40551 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b762025f20c5%2C40551%2C1733956697287:(num 1733956698289) 2024-12-11T22:38:21,120 DEBUG [RS:1;b762025f20c5:40551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:21,120 INFO [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T22:38:21,120 INFO [RS:1;b762025f20c5:40551 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:21,120 INFO [RS:1;b762025f20c5:40551 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:21,120 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733956700906Running coprocessor pre-close hooks at 1733956700906Disabling compacts and flushes for region at 1733956700906Disabling writes for close at 1733956700906Obtaining lock to block concurrent updates at 1733956700906Preparing flush snapshotting stores in 1588230740 at 1733956700906Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733956700907 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733956700912 (+5 ms)Flushing 1588230740/info: creating writer at 1733956700912Flushing 1588230740/info: appending metadata at 1733956700936 (+24 ms)Flushing 1588230740/info: closing flushed file at 1733956700936Flushing 1588230740/ns: creating writer at 1733956700969 (+33 ms)Flushing 1588230740/ns: appending metadata at 1733956700987 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1733956700987Flushing 1588230740/table: creating writer at 1733956701005 (+18 ms)Flushing 1588230740/table: appending metadata at 1733956701022 (+17 ms)Flushing 1588230740/table: closing flushed file at 1733956701022Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e23b9d8: reopening flushed file at 1733956701051 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c5c6153: reopening flushed file at 1733956701065 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7709f015: reopening flushed file at 1733956701078 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 191ms, sequenceid=11, compaction requested=false at 1733956701098 (+20 ms)Writing region close event to WAL at 1733956701107 (+9 ms)Running coprocessor post-close hooks at 1733956701119 (+12 ms)Closed at 1733956701120 (+1 ms) 2024-12-11T22:38:21,120 INFO [RS:1;b762025f20c5:40551 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:21,121 INFO [RS:1;b762025f20c5:40551 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T22:38:21,121 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:21,121 INFO [RS:1;b762025f20c5:40551 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T22:38:21,121 INFO [RS:1;b762025f20c5:40551 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T22:38:21,121 INFO [RS:1;b762025f20c5:40551 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:21,121 INFO [RS:1;b762025f20c5:40551 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40551 2024-12-11T22:38:21,121 DEBUG [RS_CLOSE_META-regionserver/b762025f20c5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T22:38:21,183 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T22:38:21,183 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T22:38:21,203 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T22:38:21,203 INFO [regionserver/b762025f20c5:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T22:38:21,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:21,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,40551,1733956697287 2024-12-11T22:38:21,260 INFO [RS:1;b762025f20c5:40551 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:21,261 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,40551,1733956697287] 2024-12-11T22:38:21,289 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,40551,1733956697287 already deleted, retry=false 2024-12-11T22:38:21,289 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,40551,1733956697287 expired; onlineServers=2 2024-12-11T22:38:21,306 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(976): stopping server b762025f20c5,45265,1733956697251; all regions closed. 2024-12-11T22:38:21,311 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,311 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,312 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,315 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741836_1012 (size=2751) 2024-12-11T22:38:21,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741836_1012 (size=2751) 2024-12-11T22:38:21,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741836_1012 (size=2751) 2024-12-11T22:38:21,327 DEBUG [RS:2;b762025f20c5:39009 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs 2024-12-11T22:38:21,327 INFO [RS:2;b762025f20c5:39009 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b762025f20c5%2C39009%2C1733956697333:(num 1733956698308) 2024-12-11T22:38:21,327 DEBUG [RS:2;b762025f20c5:39009 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:21,327 INFO [RS:2;b762025f20c5:39009 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:21,328 DEBUG [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs 2024-12-11T22:38:21,328 INFO [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b762025f20c5%2C45265%2C1733956697251.meta:.meta(num 1733956698700) 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T22:38:21,328 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:21,328 INFO [RS:2;b762025f20c5:39009 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39009 2024-12-11T22:38:21,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,329 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,329 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,329 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741833_1009 (size=93) 2024-12-11T22:38:21,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741833_1009 (size=93) 2024-12-11T22:38:21,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741833_1009 (size=93) 2024-12-11T22:38:21,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:21,340 INFO [RS:2;b762025f20c5:39009 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:21,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,39009,1733956697333 2024-12-11T22:38:21,343 DEBUG [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/oldWALs 2024-12-11T22:38:21,343 INFO [RS:0;b762025f20c5:45265 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog b762025f20c5%2C45265%2C1733956697251:(num 1733956698283) 2024-12-11T22:38:21,343 DEBUG [RS:0;b762025f20c5:45265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T22:38:21,343 INFO [RS:0;b762025f20c5:45265 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T22:38:21,344 INFO [RS:0;b762025f20c5:45265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:21,344 INFO [RS:0;b762025f20c5:45265 {}] hbase.ChoreService(370): Chore service for: regionserver/b762025f20c5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:21,344 INFO [RS:0;b762025f20c5:45265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:21,345 INFO [regionserver/b762025f20c5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:21,345 INFO [RS:0;b762025f20c5:45265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45265 2024-12-11T22:38:21,351 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,39009,1733956697333] 2024-12-11T22:38:21,355 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b762025f20c5,45265,1733956697251 2024-12-11T22:38:21,356 INFO [RS:0;b762025f20c5:45265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:21,356 ERROR [pool-324-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$366/0x00007f037c8f2698@5e2487c8 rejected from java.util.concurrent.ThreadPoolExecutor@1b260268[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-11T22:38:21,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T22:38:21,364 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,39009,1733956697333 already deleted, retry=false 2024-12-11T22:38:21,365 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,39009,1733956697333 expired; onlineServers=1 2024-12-11T22:38:21,372 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b762025f20c5,45265,1733956697251] 2024-12-11T22:38:21,380 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b762025f20c5,45265,1733956697251 already deleted, retry=false 2024-12-11T22:38:21,381 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b762025f20c5,45265,1733956697251 expired; onlineServers=0 2024-12-11T22:38:21,381 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b762025f20c5,45281,1733956697114' ***** 2024-12-11T22:38:21,381 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T22:38:21,381 INFO [M:0;b762025f20c5:45281 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T22:38:21,381 INFO [M:0;b762025f20c5:45281 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T22:38:21,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:21,381 INFO [RS:1;b762025f20c5:40551 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:21,381 DEBUG [M:0;b762025f20c5:45281 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T22:38:21,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40551-0x100cb7d5b3a0002, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:21,381 INFO [RS:1;b762025f20c5:40551 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,40551,1733956697287; zookeeper connection closed. 2024-12-11T22:38:21,381 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T22:38:21,381 DEBUG [M:0;b762025f20c5:45281 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T22:38:21,381 DEBUG [master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733956697885 {}] cleaner.HFileCleaner(306): Exit Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.large.0-1733956697885,5,FailOnTimeoutGroup] 2024-12-11T22:38:21,381 DEBUG [master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733956697885 {}] cleaner.HFileCleaner(306): Exit Thread[master/b762025f20c5:0:becomeActiveMaster-HFileCleaner.small.0-1733956697885,5,FailOnTimeoutGroup] 2024-12-11T22:38:21,381 INFO [M:0;b762025f20c5:45281 {}] hbase.ChoreService(370): Chore service for: master/b762025f20c5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T22:38:21,381 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@13f6ac29 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@13f6ac29 2024-12-11T22:38:21,381 INFO [M:0;b762025f20c5:45281 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T22:38:21,381 DEBUG [M:0;b762025f20c5:45281 {}] master.HMaster(1795): Stopping service threads 2024-12-11T22:38:21,382 INFO [M:0;b762025f20c5:45281 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T22:38:21,382 INFO [M:0;b762025f20c5:45281 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T22:38:21,382 INFO [M:0;b762025f20c5:45281 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T22:38:21,382 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T22:38:21,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T22:38:21,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T22:38:21,389 DEBUG [M:0;b762025f20c5:45281 {}] zookeeper.ZKUtil(347): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T22:38:21,389 WARN [M:0;b762025f20c5:45281 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T22:38:21,390 INFO [M:0;b762025f20c5:45281 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/.lastflushedseqids 2024-12-11T22:38:21,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741843_1019 (size=127) 2024-12-11T22:38:21,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741843_1019 (size=127) 2024-12-11T22:38:21,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741843_1019 (size=127) 2024-12-11T22:38:21,421 INFO [M:0;b762025f20c5:45281 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T22:38:21,422 INFO [M:0;b762025f20c5:45281 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T22:38:21,422 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T22:38:21,422 INFO [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:21,422 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:21,422 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T22:38:21,422 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:21,422 INFO [M:0;b762025f20c5:45281 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.81 KB heapSize=34.10 KB 2024-12-11T22:38:21,448 INFO [RS:2;b762025f20c5:39009 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:21,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:21,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39009-0x100cb7d5b3a0003, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:21,448 INFO [RS:2;b762025f20c5:39009 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,39009,1733956697333; zookeeper connection closed. 2024-12-11T22:38:21,448 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2267e2ae {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2267e2ae 2024-12-11T22:38:21,460 DEBUG [M:0;b762025f20c5:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/95e908722b424d3ab6d3cd46427a39eb is 82, key is hbase:meta,,1/info:regioninfo/1733956698762/Put/seqid=0 2024-12-11T22:38:21,474 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:21,474 INFO [RS:0;b762025f20c5:45265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:21,474 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45265-0x100cb7d5b3a0001, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:21,474 INFO [RS:0;b762025f20c5:45265 {}] regionserver.HRegionServer(1031): Exiting; stopping=b762025f20c5,45265,1733956697251; zookeeper connection closed. 2024-12-11T22:38:21,480 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7ee7e3b9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7ee7e3b9 2024-12-11T22:38:21,480 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T22:38:21,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741844_1020 (size=5672) 2024-12-11T22:38:21,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741844_1020 (size=5672) 2024-12-11T22:38:21,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741844_1020 (size=5672) 2024-12-11T22:38:21,491 INFO [M:0;b762025f20c5:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/95e908722b424d3ab6d3cd46427a39eb 2024-12-11T22:38:21,532 DEBUG [M:0;b762025f20c5:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78d07ab3f1cb49fab59c1a3ecf7c6acf is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733956699577/Put/seqid=0 2024-12-11T22:38:21,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741845_1021 (size=6437) 2024-12-11T22:38:21,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741845_1021 (size=6437) 2024-12-11T22:38:21,553 INFO [M:0;b762025f20c5:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78d07ab3f1cb49fab59c1a3ecf7c6acf 2024-12-11T22:38:21,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741845_1021 (size=6437) 2024-12-11T22:38:21,581 DEBUG [M:0;b762025f20c5:45281 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d42cb11a3dc6474bbadd74a24201da83 is 69, key is b762025f20c5,39009,1733956697333/rs:state/1733956698084/Put/seqid=0 2024-12-11T22:38:21,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741846_1022 (size=5294) 2024-12-11T22:38:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741846_1022 (size=5294) 2024-12-11T22:38:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741846_1022 (size=5294) 2024-12-11T22:38:21,624 INFO [M:0;b762025f20c5:45281 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d42cb11a3dc6474bbadd74a24201da83 2024-12-11T22:38:21,652 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/95e908722b424d3ab6d3cd46427a39eb as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/95e908722b424d3ab6d3cd46427a39eb 2024-12-11T22:38:21,670 INFO [M:0;b762025f20c5:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/95e908722b424d3ab6d3cd46427a39eb, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T22:38:21,680 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78d07ab3f1cb49fab59c1a3ecf7c6acf as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/78d07ab3f1cb49fab59c1a3ecf7c6acf 2024-12-11T22:38:21,701 INFO [M:0;b762025f20c5:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/78d07ab3f1cb49fab59c1a3ecf7c6acf, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T22:38:21,707 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d42cb11a3dc6474bbadd74a24201da83 as hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d42cb11a3dc6474bbadd74a24201da83 2024-12-11T22:38:21,718 INFO [M:0;b762025f20c5:45281 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34987/user/jenkins/test-data/4ec1b002-4202-666f-5361-b4ff6aa51d16/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d42cb11a3dc6474bbadd74a24201da83, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T22:38:21,724 INFO [M:0;b762025f20c5:45281 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 302ms, sequenceid=72, compaction requested=false 2024-12-11T22:38:21,736 INFO [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T22:38:21,736 DEBUG [M:0;b762025f20c5:45281 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733956701422Disabling compacts and flushes for region at 1733956701422Disabling writes for close at 1733956701422Obtaining lock to block concurrent updates at 1733956701422Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733956701422Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27450, getHeapSize=34856, getOffHeapSize=0, getCellsCount=85 at 1733956701423 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733956701424 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733956701424Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733956701460 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733956701460Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733956701509 (+49 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733956701531 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733956701531Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733956701562 (+31 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733956701581 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733956701581Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56fcf6a9: reopening flushed file at 1733956701642 (+61 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c67ef73: reopening flushed file at 1733956701670 (+28 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5221843e: reopening flushed file at 1733956701701 (+31 ms)Finished flush of dataSize ~26.81 KB/27450, heapSize ~33.80 KB/34616, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 302ms, sequenceid=72, compaction requested=false at 1733956701724 (+23 ms)Writing region close event to WAL at 1733956701736 (+12 ms)Closed at 1733956701736 2024-12-11T22:38:21,736 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,737 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,737 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,739 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,739 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T22:38:21,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39871 is added to blk_1073741830_1006 (size=32653) 2024-12-11T22:38:21,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45739 is added to blk_1073741830_1006 (size=32653) 2024-12-11T22:38:21,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34413 is added to blk_1073741830_1006 (size=32653) 2024-12-11T22:38:22,147 INFO [M:0;b762025f20c5:45281 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T22:38:22,147 INFO [M:0;b762025f20c5:45281 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45281 2024-12-11T22:38:22,148 INFO [M:0;b762025f20c5:45281 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T22:38:22,148 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T22:38:22,263 INFO [M:0;b762025f20c5:45281 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T22:38:22,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:22,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45281-0x100cb7d5b3a0000, quorum=127.0.0.1:57561, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T22:38:22,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@309408a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:22,337 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e780d74{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:22,337 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:22,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a8aabef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:22,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c3e0b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:22,355 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T22:38:22,355 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T22:38:22,356 WARN [BP-2052876916-172.17.0.2-1733956694700 heartbeating to localhost/127.0.0.1:34987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T22:38:22,356 WARN [BP-2052876916-172.17.0.2-1733956694700 heartbeating to localhost/127.0.0.1:34987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2052876916-172.17.0.2-1733956694700 (Datanode Uuid f54c79c1-0139-46d6-b1f5-cdaeee405238) service to localhost/127.0.0.1:34987 2024-12-11T22:38:22,358 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data6/current/BP-2052876916-172.17.0.2-1733956694700 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:22,359 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data5/current/BP-2052876916-172.17.0.2-1733956694700 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:22,361 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T22:38:22,383 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@783cf8b9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:22,387 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b98fb40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:22,387 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:22,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c414977{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:22,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47e3d455{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:22,405 WARN [BP-2052876916-172.17.0.2-1733956694700 heartbeating to localhost/127.0.0.1:34987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T22:38:22,405 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T22:38:22,405 WARN [BP-2052876916-172.17.0.2-1733956694700 heartbeating to localhost/127.0.0.1:34987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2052876916-172.17.0.2-1733956694700 (Datanode Uuid cab13a40-ce0d-452c-b2fb-43ce908deff8) service to localhost/127.0.0.1:34987 2024-12-11T22:38:22,405 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T22:38:22,406 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data3/current/BP-2052876916-172.17.0.2-1733956694700 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:22,406 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T22:38:22,407 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data4/current/BP-2052876916-172.17.0.2-1733956694700 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:22,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@787fca0d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T22:38:22,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e93ef7e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:22,428 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:22,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d0b4a63{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:22,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22ce3607{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:22,440 WARN [BP-2052876916-172.17.0.2-1733956694700 heartbeating to localhost/127.0.0.1:34987 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T22:38:22,440 WARN [BP-2052876916-172.17.0.2-1733956694700 heartbeating to localhost/127.0.0.1:34987 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2052876916-172.17.0.2-1733956694700 (Datanode Uuid b9ea5724-f543-4ad3-840b-49b6c8a6c5b8) service to localhost/127.0.0.1:34987 2024-12-11T22:38:22,441 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data1/current/BP-2052876916-172.17.0.2-1733956694700 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:22,442 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/cluster_188358d7-6c2e-a165-82b1-a2dac021c641/data/data2/current/BP-2052876916-172.17.0.2-1733956694700 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T22:38:22,442 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T22:38:22,443 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T22:38:22,443 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T22:38:22,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64064f3d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T22:38:22,451 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@485e3231{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T22:38:22,451 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T22:38:22,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@94f688b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T22:38:22,452 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7288ff9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f2c24a00-ba08-830c-ff53-2baac8f3ac04/hadoop.log.dir/,STOPPED} 2024-12-11T22:38:22,469 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T22:38:22,506 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T22:38:22,516 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 85) - Thread LEAK? -, OpenFileDescriptor=515 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=932 (was 972), ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=5358 (was 6041)